1 /* $NetBSD: pmap.c,v 1.95 2018/01/27 23:07:36 chs Exp $ */ 2 /*- 3 * Copyright (c) 2001 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 8 * 9 * Support for PPC64 Bridge mode added by Sanjay Lal <sanjayl@kymasys.com> 10 * of Kyma Systems LLC. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 36 * Copyright (C) 1995, 1996 TooLs GmbH. 37 * All rights reserved. 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 1. Redistributions of source code must retain the above copyright 43 * notice, this list of conditions and the following disclaimer. 44 * 2. Redistributions in binary form must reproduce the above copyright 45 * notice, this list of conditions and the following disclaimer in the 46 * documentation and/or other materials provided with the distribution. 47 * 3. All advertising materials mentioning features or use of this software 48 * must display the following acknowledgement: 49 * This product includes software developed by TooLs GmbH. 50 * 4. The name of TooLs GmbH may not be used to endorse or promote products 51 * derived from this software without specific prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 54 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 55 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 56 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 58 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 59 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 60 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 61 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 62 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 63 */ 64 65 #include <sys/cdefs.h> 66 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.95 2018/01/27 23:07:36 chs Exp $"); 67 68 #define PMAP_NOOPNAMES 69 70 #include "opt_ppcarch.h" 71 #include "opt_altivec.h" 72 #include "opt_multiprocessor.h" 73 #include "opt_pmap.h" 74 75 #include <sys/param.h> 76 #include <sys/proc.h> 77 #include <sys/pool.h> 78 #include <sys/queue.h> 79 #include <sys/device.h> /* for evcnt */ 80 #include <sys/systm.h> 81 #include <sys/atomic.h> 82 83 #include <uvm/uvm.h> 84 #include <uvm/uvm_physseg.h> 85 86 #include <machine/powerpc.h> 87 #include <powerpc/bat.h> 88 #include <powerpc/pcb.h> 89 #include <powerpc/psl.h> 90 #include <powerpc/spr.h> 91 #include <powerpc/oea/spr.h> 92 #include <powerpc/oea/sr_601.h> 93 94 #ifdef ALTIVEC 95 extern int pmap_use_altivec; 96 #endif 97 98 #ifdef PMAP_MEMLIMIT 99 static paddr_t pmap_memlimit = PMAP_MEMLIMIT; 100 #else 101 static paddr_t pmap_memlimit = -PAGE_SIZE; /* there is no limit */ 102 #endif 103 104 extern struct pmap kernel_pmap_; 105 static unsigned int pmap_pages_stolen; 106 static u_long pmap_pte_valid; 107 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 108 static u_long pmap_pvo_enter_depth; 109 static u_long pmap_pvo_remove_depth; 110 #endif 111 112 #ifndef MSGBUFADDR 113 extern paddr_t msgbuf_paddr; 114 #endif 115 116 static struct mem_region *mem, *avail; 117 static u_int mem_cnt, avail_cnt; 118 119 #if !defined(PMAP_OEA64) && !defined(PMAP_OEA64_BRIDGE) 120 # define PMAP_OEA 1 121 #endif 122 123 #if defined(PMAP_OEA) 124 #define _PRIxpte "lx" 125 #else 126 #define _PRIxpte PRIx64 127 #endif 128 #define _PRIxpa "lx" 129 #define _PRIxva "lx" 130 #define _PRIsr "lx" 131 132 #ifdef PMAP_NEEDS_FIXUP 133 #if defined(PMAP_OEA) 134 #define PMAPNAME(name) pmap32_##name 135 #elif defined(PMAP_OEA64) 136 #define PMAPNAME(name) pmap64_##name 137 #elif defined(PMAP_OEA64_BRIDGE) 138 #define PMAPNAME(name) pmap64bridge_##name 139 #else 140 #error unknown variant for pmap 141 #endif 142 #endif /* PMAP_NEEDS_FIXUP */ 143 144 #ifdef PMAPNAME 145 #define STATIC static 146 #define pmap_pte_spill PMAPNAME(pte_spill) 147 #define pmap_real_memory PMAPNAME(real_memory) 148 #define pmap_init PMAPNAME(init) 149 #define pmap_virtual_space PMAPNAME(virtual_space) 150 #define pmap_create PMAPNAME(create) 151 #define pmap_reference PMAPNAME(reference) 152 #define pmap_destroy PMAPNAME(destroy) 153 #define pmap_copy PMAPNAME(copy) 154 #define pmap_update PMAPNAME(update) 155 #define pmap_enter PMAPNAME(enter) 156 #define pmap_remove PMAPNAME(remove) 157 #define pmap_kenter_pa PMAPNAME(kenter_pa) 158 #define pmap_kremove PMAPNAME(kremove) 159 #define pmap_extract PMAPNAME(extract) 160 #define pmap_protect PMAPNAME(protect) 161 #define pmap_unwire PMAPNAME(unwire) 162 #define pmap_page_protect PMAPNAME(page_protect) 163 #define pmap_query_bit PMAPNAME(query_bit) 164 #define pmap_clear_bit PMAPNAME(clear_bit) 165 166 #define pmap_activate PMAPNAME(activate) 167 #define pmap_deactivate PMAPNAME(deactivate) 168 169 #define pmap_pinit PMAPNAME(pinit) 170 #define pmap_procwr PMAPNAME(procwr) 171 172 #define pmap_pool PMAPNAME(pool) 173 #define pmap_upvo_pool PMAPNAME(upvo_pool) 174 #define pmap_mpvo_pool PMAPNAME(mpvo_pool) 175 #define pmap_pvo_table PMAPNAME(pvo_table) 176 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 177 #define pmap_pte_print PMAPNAME(pte_print) 178 #define pmap_pteg_check PMAPNAME(pteg_check) 179 #define pmap_print_mmruregs PMAPNAME(print_mmuregs) 180 #define pmap_print_pte PMAPNAME(print_pte) 181 #define pmap_pteg_dist PMAPNAME(pteg_dist) 182 #endif 183 #if defined(DEBUG) || defined(PMAPCHECK) 184 #define pmap_pvo_verify PMAPNAME(pvo_verify) 185 #define pmapcheck PMAPNAME(check) 186 #endif 187 #if defined(DEBUG) || defined(PMAPDEBUG) 188 #define pmapdebug PMAPNAME(debug) 189 #endif 190 #define pmap_steal_memory PMAPNAME(steal_memory) 191 #define pmap_bootstrap PMAPNAME(bootstrap) 192 #else 193 #define STATIC /* nothing */ 194 #endif /* PMAPNAME */ 195 196 STATIC int pmap_pte_spill(struct pmap *, vaddr_t, bool); 197 STATIC void pmap_real_memory(paddr_t *, psize_t *); 198 STATIC void pmap_init(void); 199 STATIC void pmap_virtual_space(vaddr_t *, vaddr_t *); 200 STATIC pmap_t pmap_create(void); 201 STATIC void pmap_reference(pmap_t); 202 STATIC void pmap_destroy(pmap_t); 203 STATIC void pmap_copy(pmap_t, pmap_t, vaddr_t, vsize_t, vaddr_t); 204 STATIC void pmap_update(pmap_t); 205 STATIC int pmap_enter(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int); 206 STATIC void pmap_remove(pmap_t, vaddr_t, vaddr_t); 207 STATIC void pmap_kenter_pa(vaddr_t, paddr_t, vm_prot_t, u_int); 208 STATIC void pmap_kremove(vaddr_t, vsize_t); 209 STATIC bool pmap_extract(pmap_t, vaddr_t, paddr_t *); 210 211 STATIC void pmap_protect(pmap_t, vaddr_t, vaddr_t, vm_prot_t); 212 STATIC void pmap_unwire(pmap_t, vaddr_t); 213 STATIC void pmap_page_protect(struct vm_page *, vm_prot_t); 214 STATIC bool pmap_query_bit(struct vm_page *, int); 215 STATIC bool pmap_clear_bit(struct vm_page *, int); 216 217 STATIC void pmap_activate(struct lwp *); 218 STATIC void pmap_deactivate(struct lwp *); 219 220 STATIC void pmap_pinit(pmap_t pm); 221 STATIC void pmap_procwr(struct proc *, vaddr_t, size_t); 222 223 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 224 STATIC void pmap_pte_print(volatile struct pte *); 225 STATIC void pmap_pteg_check(void); 226 STATIC void pmap_print_mmuregs(void); 227 STATIC void pmap_print_pte(pmap_t, vaddr_t); 228 STATIC void pmap_pteg_dist(void); 229 #endif 230 #if defined(DEBUG) || defined(PMAPCHECK) 231 STATIC void pmap_pvo_verify(void); 232 #endif 233 STATIC vaddr_t pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *); 234 STATIC void pmap_bootstrap(paddr_t, paddr_t); 235 236 #ifdef PMAPNAME 237 const struct pmap_ops PMAPNAME(ops) = { 238 .pmapop_pte_spill = pmap_pte_spill, 239 .pmapop_real_memory = pmap_real_memory, 240 .pmapop_init = pmap_init, 241 .pmapop_virtual_space = pmap_virtual_space, 242 .pmapop_create = pmap_create, 243 .pmapop_reference = pmap_reference, 244 .pmapop_destroy = pmap_destroy, 245 .pmapop_copy = pmap_copy, 246 .pmapop_update = pmap_update, 247 .pmapop_enter = pmap_enter, 248 .pmapop_remove = pmap_remove, 249 .pmapop_kenter_pa = pmap_kenter_pa, 250 .pmapop_kremove = pmap_kremove, 251 .pmapop_extract = pmap_extract, 252 .pmapop_protect = pmap_protect, 253 .pmapop_unwire = pmap_unwire, 254 .pmapop_page_protect = pmap_page_protect, 255 .pmapop_query_bit = pmap_query_bit, 256 .pmapop_clear_bit = pmap_clear_bit, 257 .pmapop_activate = pmap_activate, 258 .pmapop_deactivate = pmap_deactivate, 259 .pmapop_pinit = pmap_pinit, 260 .pmapop_procwr = pmap_procwr, 261 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 262 .pmapop_pte_print = pmap_pte_print, 263 .pmapop_pteg_check = pmap_pteg_check, 264 .pmapop_print_mmuregs = pmap_print_mmuregs, 265 .pmapop_print_pte = pmap_print_pte, 266 .pmapop_pteg_dist = pmap_pteg_dist, 267 #else 268 .pmapop_pte_print = NULL, 269 .pmapop_pteg_check = NULL, 270 .pmapop_print_mmuregs = NULL, 271 .pmapop_print_pte = NULL, 272 .pmapop_pteg_dist = NULL, 273 #endif 274 #if defined(DEBUG) || defined(PMAPCHECK) 275 .pmapop_pvo_verify = pmap_pvo_verify, 276 #else 277 .pmapop_pvo_verify = NULL, 278 #endif 279 .pmapop_steal_memory = pmap_steal_memory, 280 .pmapop_bootstrap = pmap_bootstrap, 281 }; 282 #endif /* !PMAPNAME */ 283 284 /* 285 * The following structure is aligned to 32 bytes 286 */ 287 struct pvo_entry { 288 LIST_ENTRY(pvo_entry) pvo_vlink; /* Link to common virt page */ 289 TAILQ_ENTRY(pvo_entry) pvo_olink; /* Link to overflow entry */ 290 struct pte pvo_pte; /* Prebuilt PTE */ 291 pmap_t pvo_pmap; /* ptr to owning pmap */ 292 vaddr_t pvo_vaddr; /* VA of entry */ 293 #define PVO_PTEGIDX_MASK 0x0007 /* which PTEG slot */ 294 #define PVO_PTEGIDX_VALID 0x0008 /* slot is valid */ 295 #define PVO_WIRED 0x0010 /* PVO entry is wired */ 296 #define PVO_MANAGED 0x0020 /* PVO e. for managed page */ 297 #define PVO_EXECUTABLE 0x0040 /* PVO e. for executable page */ 298 #define PVO_WIRED_P(pvo) ((pvo)->pvo_vaddr & PVO_WIRED) 299 #define PVO_MANAGED_P(pvo) ((pvo)->pvo_vaddr & PVO_MANAGED) 300 #define PVO_EXECUTABLE_P(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE) 301 #define PVO_ENTER_INSERT 0 /* PVO has been removed */ 302 #define PVO_SPILL_UNSET 1 /* PVO has been evicted */ 303 #define PVO_SPILL_SET 2 /* PVO has been spilled */ 304 #define PVO_SPILL_INSERT 3 /* PVO has been inserted */ 305 #define PVO_PMAP_PAGE_PROTECT 4 /* PVO has changed */ 306 #define PVO_PMAP_PROTECT 5 /* PVO has changed */ 307 #define PVO_REMOVE 6 /* PVO has been removed */ 308 #define PVO_WHERE_MASK 15 309 #define PVO_WHERE_SHFT 8 310 } __attribute__ ((aligned (32))); 311 #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) 312 #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) 313 #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) 314 #define PVO_PTEGIDX_CLR(pvo) \ 315 ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) 316 #define PVO_PTEGIDX_SET(pvo,i) \ 317 ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) 318 #define PVO_WHERE(pvo,w) \ 319 ((pvo)->pvo_vaddr &= ~(PVO_WHERE_MASK << PVO_WHERE_SHFT), \ 320 (pvo)->pvo_vaddr |= ((PVO_ ## w) << PVO_WHERE_SHFT)) 321 322 TAILQ_HEAD(pvo_tqhead, pvo_entry); 323 struct pvo_tqhead *pmap_pvo_table; /* pvo entries by ptegroup index */ 324 static struct pvo_head pmap_pvo_kunmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged); /* list of unmanaged pages */ 325 static struct pvo_head pmap_pvo_unmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged); /* list of unmanaged pages */ 326 327 struct pool pmap_pool; /* pool for pmap structures */ 328 struct pool pmap_upvo_pool; /* pool for pvo entries for unmanaged pages */ 329 struct pool pmap_mpvo_pool; /* pool for pvo entries for managed pages */ 330 331 /* 332 * We keep a cache of unmanaged pages to be used for pvo entries for 333 * unmanaged pages. 334 */ 335 struct pvo_page { 336 SIMPLEQ_ENTRY(pvo_page) pvop_link; 337 }; 338 SIMPLEQ_HEAD(pvop_head, pvo_page); 339 static struct pvop_head pmap_upvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_upvop_head); 340 static struct pvop_head pmap_mpvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_mpvop_head); 341 static u_long pmap_upvop_free; 342 static u_long pmap_upvop_maxfree; 343 static u_long pmap_mpvop_free; 344 static u_long pmap_mpvop_maxfree; 345 346 static void *pmap_pool_ualloc(struct pool *, int); 347 static void *pmap_pool_malloc(struct pool *, int); 348 349 static void pmap_pool_ufree(struct pool *, void *); 350 static void pmap_pool_mfree(struct pool *, void *); 351 352 static struct pool_allocator pmap_pool_mallocator = { 353 .pa_alloc = pmap_pool_malloc, 354 .pa_free = pmap_pool_mfree, 355 .pa_pagesz = 0, 356 }; 357 358 static struct pool_allocator pmap_pool_uallocator = { 359 .pa_alloc = pmap_pool_ualloc, 360 .pa_free = pmap_pool_ufree, 361 .pa_pagesz = 0, 362 }; 363 364 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 365 void pmap_pte_print(volatile struct pte *); 366 void pmap_pteg_check(void); 367 void pmap_pteg_dist(void); 368 void pmap_print_pte(pmap_t, vaddr_t); 369 void pmap_print_mmuregs(void); 370 #endif 371 372 #if defined(DEBUG) || defined(PMAPCHECK) 373 #ifdef PMAPCHECK 374 int pmapcheck = 1; 375 #else 376 int pmapcheck = 0; 377 #endif 378 void pmap_pvo_verify(void); 379 static void pmap_pvo_check(const struct pvo_entry *); 380 #define PMAP_PVO_CHECK(pvo) \ 381 do { \ 382 if (pmapcheck) \ 383 pmap_pvo_check(pvo); \ 384 } while (0) 385 #else 386 #define PMAP_PVO_CHECK(pvo) do { } while (/*CONSTCOND*/0) 387 #endif 388 static int pmap_pte_insert(int, struct pte *); 389 static int pmap_pvo_enter(pmap_t, struct pool *, struct pvo_head *, 390 vaddr_t, paddr_t, register_t, int); 391 static void pmap_pvo_remove(struct pvo_entry *, int, struct pvo_head *); 392 static void pmap_pvo_free(struct pvo_entry *); 393 static void pmap_pvo_free_list(struct pvo_head *); 394 static struct pvo_entry *pmap_pvo_find_va(pmap_t, vaddr_t, int *); 395 static volatile struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int); 396 static struct pvo_entry *pmap_pvo_reclaim(struct pmap *); 397 static void pvo_set_exec(struct pvo_entry *); 398 static void pvo_clear_exec(struct pvo_entry *); 399 400 static void tlbia(void); 401 402 static void pmap_release(pmap_t); 403 static paddr_t pmap_boot_find_memory(psize_t, psize_t, int); 404 405 static uint32_t pmap_pvo_reclaim_nextidx; 406 #ifdef DEBUG 407 static int pmap_pvo_reclaim_debugctr; 408 #endif 409 410 #define VSID_NBPW (sizeof(uint32_t) * 8) 411 static uint32_t pmap_vsid_bitmap[NPMAPS / VSID_NBPW]; 412 413 static int pmap_initialized; 414 415 #if defined(DEBUG) || defined(PMAPDEBUG) 416 #define PMAPDEBUG_BOOT 0x0001 417 #define PMAPDEBUG_PTE 0x0002 418 #define PMAPDEBUG_EXEC 0x0008 419 #define PMAPDEBUG_PVOENTER 0x0010 420 #define PMAPDEBUG_PVOREMOVE 0x0020 421 #define PMAPDEBUG_ACTIVATE 0x0100 422 #define PMAPDEBUG_CREATE 0x0200 423 #define PMAPDEBUG_ENTER 0x1000 424 #define PMAPDEBUG_KENTER 0x2000 425 #define PMAPDEBUG_KREMOVE 0x4000 426 #define PMAPDEBUG_REMOVE 0x8000 427 428 unsigned int pmapdebug = 0; 429 430 # define DPRINTF(x, ...) printf(x, __VA_ARGS__) 431 # define DPRINTFN(n, x, ...) do if (pmapdebug & PMAPDEBUG_ ## n) printf(x, __VA_ARGS__); while (0) 432 #else 433 # define DPRINTF(x, ...) do { } while (0) 434 # define DPRINTFN(n, x, ...) do { } while (0) 435 #endif 436 437 438 #ifdef PMAPCOUNTERS 439 /* 440 * From pmap_subr.c 441 */ 442 extern struct evcnt pmap_evcnt_mappings; 443 extern struct evcnt pmap_evcnt_unmappings; 444 445 extern struct evcnt pmap_evcnt_kernel_mappings; 446 extern struct evcnt pmap_evcnt_kernel_unmappings; 447 448 extern struct evcnt pmap_evcnt_mappings_replaced; 449 450 extern struct evcnt pmap_evcnt_exec_mappings; 451 extern struct evcnt pmap_evcnt_exec_cached; 452 453 extern struct evcnt pmap_evcnt_exec_synced; 454 extern struct evcnt pmap_evcnt_exec_synced_clear_modify; 455 extern struct evcnt pmap_evcnt_exec_synced_pvo_remove; 456 457 extern struct evcnt pmap_evcnt_exec_uncached_page_protect; 458 extern struct evcnt pmap_evcnt_exec_uncached_clear_modify; 459 extern struct evcnt pmap_evcnt_exec_uncached_zero_page; 460 extern struct evcnt pmap_evcnt_exec_uncached_copy_page; 461 extern struct evcnt pmap_evcnt_exec_uncached_pvo_remove; 462 463 extern struct evcnt pmap_evcnt_updates; 464 extern struct evcnt pmap_evcnt_collects; 465 extern struct evcnt pmap_evcnt_copies; 466 467 extern struct evcnt pmap_evcnt_ptes_spilled; 468 extern struct evcnt pmap_evcnt_ptes_unspilled; 469 extern struct evcnt pmap_evcnt_ptes_evicted; 470 471 extern struct evcnt pmap_evcnt_ptes_primary[8]; 472 extern struct evcnt pmap_evcnt_ptes_secondary[8]; 473 extern struct evcnt pmap_evcnt_ptes_removed; 474 extern struct evcnt pmap_evcnt_ptes_changed; 475 extern struct evcnt pmap_evcnt_pvos_reclaimed; 476 extern struct evcnt pmap_evcnt_pvos_failed; 477 478 extern struct evcnt pmap_evcnt_zeroed_pages; 479 extern struct evcnt pmap_evcnt_copied_pages; 480 extern struct evcnt pmap_evcnt_idlezeroed_pages; 481 482 #define PMAPCOUNT(ev) ((pmap_evcnt_ ## ev).ev_count++) 483 #define PMAPCOUNT2(ev) ((ev).ev_count++) 484 #else 485 #define PMAPCOUNT(ev) ((void) 0) 486 #define PMAPCOUNT2(ev) ((void) 0) 487 #endif 488 489 #define TLBIE(va) __asm volatile("tlbie %0" :: "r"(va)) 490 491 /* XXXSL: this needs to be moved to assembler */ 492 #define TLBIEL(va) __asm __volatile("tlbie %0" :: "r"(va)) 493 494 #ifdef MD_TLBSYNC 495 #define TLBSYNC() MD_TLBSYNC() 496 #else 497 #define TLBSYNC() __asm volatile("tlbsync") 498 #endif 499 #define SYNC() __asm volatile("sync") 500 #define EIEIO() __asm volatile("eieio") 501 #define DCBST(va) __asm __volatile("dcbst 0,%0" :: "r"(va)) 502 #define MFMSR() mfmsr() 503 #define MTMSR(psl) mtmsr(psl) 504 #define MFPVR() mfpvr() 505 #define MFSRIN(va) mfsrin(va) 506 #define MFTB() mfrtcltbl() 507 508 #if defined(DDB) && !defined(PMAP_OEA64) 509 static inline register_t 510 mfsrin(vaddr_t va) 511 { 512 register_t sr; 513 __asm volatile ("mfsrin %0,%1" : "=r"(sr) : "r"(va)); 514 return sr; 515 } 516 #endif /* DDB && !PMAP_OEA64 */ 517 518 #if defined (PMAP_OEA64_BRIDGE) 519 extern void mfmsr64 (register64_t *result); 520 #endif /* PMAP_OEA64_BRIDGE */ 521 522 #define PMAP_LOCK() KERNEL_LOCK(1, NULL) 523 #define PMAP_UNLOCK() KERNEL_UNLOCK_ONE(NULL) 524 525 static inline register_t 526 pmap_interrupts_off(void) 527 { 528 register_t msr = MFMSR(); 529 if (msr & PSL_EE) 530 MTMSR(msr & ~PSL_EE); 531 return msr; 532 } 533 534 static void 535 pmap_interrupts_restore(register_t msr) 536 { 537 if (msr & PSL_EE) 538 MTMSR(msr); 539 } 540 541 static inline u_int32_t 542 mfrtcltbl(void) 543 { 544 #ifdef PPC_OEA601 545 if ((MFPVR() >> 16) == MPC601) 546 return (mfrtcl() >> 7); 547 else 548 #endif 549 return (mftbl()); 550 } 551 552 /* 553 * These small routines may have to be replaced, 554 * if/when we support processors other that the 604. 555 */ 556 557 void 558 tlbia(void) 559 { 560 char *i; 561 562 SYNC(); 563 #if defined(PMAP_OEA) 564 /* 565 * Why not use "tlbia"? Because not all processors implement it. 566 * 567 * This needs to be a per-CPU callback to do the appropriate thing 568 * for the CPU. XXX 569 */ 570 for (i = 0; i < (char *)0x00040000; i += 0x00001000) { 571 TLBIE(i); 572 EIEIO(); 573 SYNC(); 574 } 575 #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE) 576 /* This is specifically for the 970, 970UM v1.6 pp. 140. */ 577 for (i = 0; i <= (char *)0xFF000; i += 0x00001000) { 578 TLBIEL(i); 579 EIEIO(); 580 SYNC(); 581 } 582 #endif 583 TLBSYNC(); 584 SYNC(); 585 } 586 587 static inline register_t 588 va_to_vsid(const struct pmap *pm, vaddr_t addr) 589 { 590 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 591 return (pm->pm_sr[addr >> ADDR_SR_SHFT] & SR_VSID) >> SR_VSID_SHFT; 592 #else /* PMAP_OEA64 */ 593 #if 0 594 const struct ste *ste; 595 register_t hash; 596 int i; 597 598 hash = (addr >> ADDR_ESID_SHFT) & ADDR_ESID_HASH; 599 600 /* 601 * Try the primary group first 602 */ 603 ste = pm->pm_stes[hash].stes; 604 for (i = 0; i < 8; i++, ste++) { 605 if (ste->ste_hi & STE_V) && 606 (addr & ~(ADDR_POFF|ADDR_PIDX)) == (ste->ste_hi & STE_ESID)) 607 return ste; 608 } 609 610 /* 611 * Then the secondary group. 612 */ 613 ste = pm->pm_stes[hash ^ ADDR_ESID_HASH].stes; 614 for (i = 0; i < 8; i++, ste++) { 615 if (ste->ste_hi & STE_V) && 616 (addr & ~(ADDR_POFF|ADDR_PIDX)) == (ste->ste_hi & STE_ESID)) 617 return addr; 618 } 619 620 return NULL; 621 #else 622 /* 623 * Rather than searching the STE groups for the VSID, we know 624 * how we generate that from the ESID and so do that. 625 */ 626 return VSID_MAKE(addr >> ADDR_SR_SHFT, pm->pm_vsid) >> SR_VSID_SHFT; 627 #endif 628 #endif /* PMAP_OEA */ 629 } 630 631 static inline register_t 632 va_to_pteg(const struct pmap *pm, vaddr_t addr) 633 { 634 register_t hash; 635 636 hash = va_to_vsid(pm, addr) ^ ((addr & ADDR_PIDX) >> ADDR_PIDX_SHFT); 637 return hash & pmap_pteg_mask; 638 } 639 640 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 641 /* 642 * Given a PTE in the page table, calculate the VADDR that hashes to it. 643 * The only bit of magic is that the top 4 bits of the address doesn't 644 * technically exist in the PTE. But we know we reserved 4 bits of the 645 * VSID for it so that's how we get it. 646 */ 647 static vaddr_t 648 pmap_pte_to_va(volatile const struct pte *pt) 649 { 650 vaddr_t va; 651 uintptr_t ptaddr = (uintptr_t) pt; 652 653 if (pt->pte_hi & PTE_HID) 654 ptaddr ^= (pmap_pteg_mask * sizeof(struct pteg)); 655 656 /* PPC Bits 10-19 PPC64 Bits 42-51 */ 657 #if defined(PMAP_OEA) 658 va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x3ff; 659 #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE) 660 va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x7ff; 661 #endif 662 va <<= ADDR_PIDX_SHFT; 663 664 /* PPC Bits 4-9 PPC64 Bits 36-41 */ 665 va |= (pt->pte_hi & PTE_API) << ADDR_API_SHFT; 666 667 #if defined(PMAP_OEA64) 668 /* PPC63 Bits 0-35 */ 669 /* va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT; */ 670 #elif defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE) 671 /* PPC Bits 0-3 */ 672 va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT; 673 #endif 674 675 return va; 676 } 677 #endif 678 679 static inline struct pvo_head * 680 pa_to_pvoh(paddr_t pa, struct vm_page **pg_p) 681 { 682 struct vm_page *pg; 683 struct vm_page_md *md; 684 685 pg = PHYS_TO_VM_PAGE(pa); 686 if (pg_p != NULL) 687 *pg_p = pg; 688 if (pg == NULL) 689 return &pmap_pvo_unmanaged; 690 md = VM_PAGE_TO_MD(pg); 691 return &md->mdpg_pvoh; 692 } 693 694 static inline struct pvo_head * 695 vm_page_to_pvoh(struct vm_page *pg) 696 { 697 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 698 699 return &md->mdpg_pvoh; 700 } 701 702 703 static inline void 704 pmap_attr_clear(struct vm_page *pg, int ptebit) 705 { 706 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 707 708 md->mdpg_attrs &= ~ptebit; 709 } 710 711 static inline int 712 pmap_attr_fetch(struct vm_page *pg) 713 { 714 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 715 716 return md->mdpg_attrs; 717 } 718 719 static inline void 720 pmap_attr_save(struct vm_page *pg, int ptebit) 721 { 722 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 723 724 md->mdpg_attrs |= ptebit; 725 } 726 727 static inline int 728 pmap_pte_compare(const volatile struct pte *pt, const struct pte *pvo_pt) 729 { 730 if (pt->pte_hi == pvo_pt->pte_hi 731 #if 0 732 && ((pt->pte_lo ^ pvo_pt->pte_lo) & 733 ~(PTE_REF|PTE_CHG)) == 0 734 #endif 735 ) 736 return 1; 737 return 0; 738 } 739 740 static inline void 741 pmap_pte_create(struct pte *pt, const struct pmap *pm, vaddr_t va, register_t pte_lo) 742 { 743 /* 744 * Construct the PTE. Default to IMB initially. Valid bit 745 * only gets set when the real pte is set in memory. 746 * 747 * Note: Don't set the valid bit for correct operation of tlb update. 748 */ 749 #if defined(PMAP_OEA) 750 pt->pte_hi = (va_to_vsid(pm, va) << PTE_VSID_SHFT) 751 | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API); 752 pt->pte_lo = pte_lo; 753 #elif defined (PMAP_OEA64_BRIDGE) || defined (PMAP_OEA64) 754 pt->pte_hi = ((u_int64_t)va_to_vsid(pm, va) << PTE_VSID_SHFT) 755 | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API); 756 pt->pte_lo = (u_int64_t) pte_lo; 757 #endif /* PMAP_OEA */ 758 } 759 760 static inline void 761 pmap_pte_synch(volatile struct pte *pt, struct pte *pvo_pt) 762 { 763 pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF|PTE_CHG); 764 } 765 766 static inline void 767 pmap_pte_clear(volatile struct pte *pt, vaddr_t va, int ptebit) 768 { 769 /* 770 * As shown in Section 7.6.3.2.3 771 */ 772 pt->pte_lo &= ~ptebit; 773 TLBIE(va); 774 SYNC(); 775 EIEIO(); 776 TLBSYNC(); 777 SYNC(); 778 #ifdef MULTIPROCESSOR 779 DCBST(pt); 780 #endif 781 } 782 783 static inline void 784 pmap_pte_set(volatile struct pte *pt, struct pte *pvo_pt) 785 { 786 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 787 if (pvo_pt->pte_hi & PTE_VALID) 788 panic("pte_set: setting an already valid pte %p", pvo_pt); 789 #endif 790 pvo_pt->pte_hi |= PTE_VALID; 791 792 /* 793 * Update the PTE as defined in section 7.6.3.1 794 * Note that the REF/CHG bits are from pvo_pt and thus should 795 * have been saved so this routine can restore them (if desired). 796 */ 797 pt->pte_lo = pvo_pt->pte_lo; 798 EIEIO(); 799 pt->pte_hi = pvo_pt->pte_hi; 800 TLBSYNC(); 801 SYNC(); 802 #ifdef MULTIPROCESSOR 803 DCBST(pt); 804 #endif 805 pmap_pte_valid++; 806 } 807 808 static inline void 809 pmap_pte_unset(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va) 810 { 811 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 812 if ((pvo_pt->pte_hi & PTE_VALID) == 0) 813 panic("pte_unset: attempt to unset an inactive pte#1 %p/%p", pvo_pt, pt); 814 if ((pt->pte_hi & PTE_VALID) == 0) 815 panic("pte_unset: attempt to unset an inactive pte#2 %p/%p", pvo_pt, pt); 816 #endif 817 818 pvo_pt->pte_hi &= ~PTE_VALID; 819 /* 820 * Force the ref & chg bits back into the PTEs. 821 */ 822 SYNC(); 823 /* 824 * Invalidate the pte ... (Section 7.6.3.3) 825 */ 826 pt->pte_hi &= ~PTE_VALID; 827 SYNC(); 828 TLBIE(va); 829 SYNC(); 830 EIEIO(); 831 TLBSYNC(); 832 SYNC(); 833 /* 834 * Save the ref & chg bits ... 835 */ 836 pmap_pte_synch(pt, pvo_pt); 837 pmap_pte_valid--; 838 } 839 840 static inline void 841 pmap_pte_change(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va) 842 { 843 /* 844 * Invalidate the PTE 845 */ 846 pmap_pte_unset(pt, pvo_pt, va); 847 pmap_pte_set(pt, pvo_pt); 848 } 849 850 /* 851 * Try to insert the PTE @ *pvo_pt into the pmap_pteg_table at ptegidx 852 * (either primary or secondary location). 853 * 854 * Note: both the destination and source PTEs must not have PTE_VALID set. 855 */ 856 857 static int 858 pmap_pte_insert(int ptegidx, struct pte *pvo_pt) 859 { 860 volatile struct pte *pt; 861 int i; 862 863 #if defined(DEBUG) 864 DPRINTFN(PTE, "pmap_pte_insert: idx %#x, pte %#" _PRIxpte " %#" _PRIxpte "\n", 865 ptegidx, pvo_pt->pte_hi, pvo_pt->pte_lo); 866 #endif 867 /* 868 * First try primary hash. 869 */ 870 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 871 if ((pt->pte_hi & PTE_VALID) == 0) { 872 pvo_pt->pte_hi &= ~PTE_HID; 873 pmap_pte_set(pt, pvo_pt); 874 return i; 875 } 876 } 877 878 /* 879 * Now try secondary hash. 880 */ 881 ptegidx ^= pmap_pteg_mask; 882 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 883 if ((pt->pte_hi & PTE_VALID) == 0) { 884 pvo_pt->pte_hi |= PTE_HID; 885 pmap_pte_set(pt, pvo_pt); 886 return i; 887 } 888 } 889 return -1; 890 } 891 892 /* 893 * Spill handler. 894 * 895 * Tries to spill a page table entry from the overflow area. 896 * This runs in either real mode (if dealing with a exception spill) 897 * or virtual mode when dealing with manually spilling one of the 898 * kernel's pte entries. In either case, interrupts are already 899 * disabled. 900 */ 901 902 int 903 pmap_pte_spill(struct pmap *pm, vaddr_t addr, bool exec) 904 { 905 struct pvo_entry *source_pvo, *victim_pvo, *next_pvo; 906 struct pvo_entry *pvo; 907 /* XXX: gcc -- vpvoh is always set at either *1* or *2* */ 908 struct pvo_tqhead *pvoh, *vpvoh = NULL; 909 int ptegidx, i, j; 910 volatile struct pteg *pteg; 911 volatile struct pte *pt; 912 913 PMAP_LOCK(); 914 915 ptegidx = va_to_pteg(pm, addr); 916 917 /* 918 * Have to substitute some entry. Use the primary hash for this. 919 * Use low bits of timebase as random generator. Make sure we are 920 * not picking a kernel pte for replacement. 921 */ 922 pteg = &pmap_pteg_table[ptegidx]; 923 i = MFTB() & 7; 924 for (j = 0; j < 8; j++) { 925 pt = &pteg->pt[i]; 926 if ((pt->pte_hi & PTE_VALID) == 0) 927 break; 928 if (VSID_TO_HASH((pt->pte_hi & PTE_VSID) >> PTE_VSID_SHFT) 929 < PHYSMAP_VSIDBITS) 930 break; 931 i = (i + 1) & 7; 932 } 933 KASSERT(j < 8); 934 935 source_pvo = NULL; 936 victim_pvo = NULL; 937 pvoh = &pmap_pvo_table[ptegidx]; 938 TAILQ_FOREACH(pvo, pvoh, pvo_olink) { 939 940 /* 941 * We need to find pvo entry for this address... 942 */ 943 PMAP_PVO_CHECK(pvo); /* sanity check */ 944 945 /* 946 * If we haven't found the source and we come to a PVO with 947 * a valid PTE, then we know we can't find it because all 948 * evicted PVOs always are first in the list. 949 */ 950 if (source_pvo == NULL && (pvo->pvo_pte.pte_hi & PTE_VALID)) 951 break; 952 if (source_pvo == NULL && pm == pvo->pvo_pmap && 953 addr == PVO_VADDR(pvo)) { 954 955 /* 956 * Now we have found the entry to be spilled into the 957 * pteg. Attempt to insert it into the page table. 958 */ 959 j = pmap_pte_insert(ptegidx, &pvo->pvo_pte); 960 if (j >= 0) { 961 PVO_PTEGIDX_SET(pvo, j); 962 PMAP_PVO_CHECK(pvo); /* sanity check */ 963 PVO_WHERE(pvo, SPILL_INSERT); 964 pvo->pvo_pmap->pm_evictions--; 965 PMAPCOUNT(ptes_spilled); 966 PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID) 967 ? pmap_evcnt_ptes_secondary 968 : pmap_evcnt_ptes_primary)[j]); 969 970 /* 971 * Since we keep the evicted entries at the 972 * from of the PVO list, we need move this 973 * (now resident) PVO after the evicted 974 * entries. 975 */ 976 next_pvo = TAILQ_NEXT(pvo, pvo_olink); 977 978 /* 979 * If we don't have to move (either we were the 980 * last entry or the next entry was valid), 981 * don't change our position. Otherwise 982 * move ourselves to the tail of the queue. 983 */ 984 if (next_pvo != NULL && 985 !(next_pvo->pvo_pte.pte_hi & PTE_VALID)) { 986 TAILQ_REMOVE(pvoh, pvo, pvo_olink); 987 TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink); 988 } 989 PMAP_UNLOCK(); 990 return 1; 991 } 992 source_pvo = pvo; 993 if (exec && !PVO_EXECUTABLE_P(source_pvo)) { 994 return 0; 995 } 996 if (victim_pvo != NULL) 997 break; 998 } 999 1000 /* 1001 * We also need the pvo entry of the victim we are replacing 1002 * so save the R & C bits of the PTE. 1003 */ 1004 if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && 1005 pmap_pte_compare(pt, &pvo->pvo_pte)) { 1006 vpvoh = pvoh; /* *1* */ 1007 victim_pvo = pvo; 1008 if (source_pvo != NULL) 1009 break; 1010 } 1011 } 1012 1013 if (source_pvo == NULL) { 1014 PMAPCOUNT(ptes_unspilled); 1015 PMAP_UNLOCK(); 1016 return 0; 1017 } 1018 1019 if (victim_pvo == NULL) { 1020 if ((pt->pte_hi & PTE_HID) == 0) 1021 panic("pmap_pte_spill: victim p-pte (%p) has " 1022 "no pvo entry!", pt); 1023 1024 /* 1025 * If this is a secondary PTE, we need to search 1026 * its primary pvo bucket for the matching PVO. 1027 */ 1028 vpvoh = &pmap_pvo_table[ptegidx ^ pmap_pteg_mask]; /* *2* */ 1029 TAILQ_FOREACH(pvo, vpvoh, pvo_olink) { 1030 PMAP_PVO_CHECK(pvo); /* sanity check */ 1031 1032 /* 1033 * We also need the pvo entry of the victim we are 1034 * replacing so save the R & C bits of the PTE. 1035 */ 1036 if (pmap_pte_compare(pt, &pvo->pvo_pte)) { 1037 victim_pvo = pvo; 1038 break; 1039 } 1040 } 1041 if (victim_pvo == NULL) 1042 panic("pmap_pte_spill: victim s-pte (%p) has " 1043 "no pvo entry!", pt); 1044 } 1045 1046 /* 1047 * The victim should be not be a kernel PVO/PTE entry. 1048 */ 1049 KASSERT(victim_pvo->pvo_pmap != pmap_kernel()); 1050 KASSERT(PVO_PTEGIDX_ISSET(victim_pvo)); 1051 KASSERT(PVO_PTEGIDX_GET(victim_pvo) == i); 1052 1053 /* 1054 * We are invalidating the TLB entry for the EA for the 1055 * we are replacing even though its valid; If we don't 1056 * we lose any ref/chg bit changes contained in the TLB 1057 * entry. 1058 */ 1059 source_pvo->pvo_pte.pte_hi &= ~PTE_HID; 1060 1061 /* 1062 * To enforce the PVO list ordering constraint that all 1063 * evicted entries should come before all valid entries, 1064 * move the source PVO to the tail of its list and the 1065 * victim PVO to the head of its list (which might not be 1066 * the same list, if the victim was using the secondary hash). 1067 */ 1068 TAILQ_REMOVE(pvoh, source_pvo, pvo_olink); 1069 TAILQ_INSERT_TAIL(pvoh, source_pvo, pvo_olink); 1070 TAILQ_REMOVE(vpvoh, victim_pvo, pvo_olink); 1071 TAILQ_INSERT_HEAD(vpvoh, victim_pvo, pvo_olink); 1072 pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr); 1073 pmap_pte_set(pt, &source_pvo->pvo_pte); 1074 victim_pvo->pvo_pmap->pm_evictions++; 1075 source_pvo->pvo_pmap->pm_evictions--; 1076 PVO_WHERE(victim_pvo, SPILL_UNSET); 1077 PVO_WHERE(source_pvo, SPILL_SET); 1078 1079 PVO_PTEGIDX_CLR(victim_pvo); 1080 PVO_PTEGIDX_SET(source_pvo, i); 1081 PMAPCOUNT2(pmap_evcnt_ptes_primary[i]); 1082 PMAPCOUNT(ptes_spilled); 1083 PMAPCOUNT(ptes_evicted); 1084 PMAPCOUNT(ptes_removed); 1085 1086 PMAP_PVO_CHECK(victim_pvo); 1087 PMAP_PVO_CHECK(source_pvo); 1088 1089 PMAP_UNLOCK(); 1090 return 1; 1091 } 1092 1093 /* 1094 * Restrict given range to physical memory 1095 */ 1096 void 1097 pmap_real_memory(paddr_t *start, psize_t *size) 1098 { 1099 struct mem_region *mp; 1100 1101 for (mp = mem; mp->size; mp++) { 1102 if (*start + *size > mp->start 1103 && *start < mp->start + mp->size) { 1104 if (*start < mp->start) { 1105 *size -= mp->start - *start; 1106 *start = mp->start; 1107 } 1108 if (*start + *size > mp->start + mp->size) 1109 *size = mp->start + mp->size - *start; 1110 return; 1111 } 1112 } 1113 *size = 0; 1114 } 1115 1116 /* 1117 * Initialize anything else for pmap handling. 1118 * Called during vm_init(). 1119 */ 1120 void 1121 pmap_init(void) 1122 { 1123 pool_init(&pmap_mpvo_pool, sizeof(struct pvo_entry), 1124 sizeof(struct pvo_entry), 0, 0, "pmap_mpvopl", 1125 &pmap_pool_mallocator, IPL_NONE); 1126 1127 pool_setlowat(&pmap_mpvo_pool, 1008); 1128 1129 pmap_initialized = 1; 1130 1131 } 1132 1133 /* 1134 * How much virtual space does the kernel get? 1135 */ 1136 void 1137 pmap_virtual_space(vaddr_t *start, vaddr_t *end) 1138 { 1139 /* 1140 * For now, reserve one segment (minus some overhead) for kernel 1141 * virtual memory 1142 */ 1143 *start = VM_MIN_KERNEL_ADDRESS; 1144 *end = VM_MAX_KERNEL_ADDRESS; 1145 } 1146 1147 /* 1148 * Allocate, initialize, and return a new physical map. 1149 */ 1150 pmap_t 1151 pmap_create(void) 1152 { 1153 pmap_t pm; 1154 1155 pm = pool_get(&pmap_pool, PR_WAITOK); 1156 KASSERT((vaddr_t)pm < VM_MIN_KERNEL_ADDRESS); 1157 memset((void *)pm, 0, sizeof *pm); 1158 pmap_pinit(pm); 1159 1160 DPRINTFN(CREATE, "pmap_create: pm %p:\n" 1161 "\t%#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr 1162 " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr "\n" 1163 "\t%#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr 1164 " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr "\n", 1165 pm, 1166 pm->pm_sr[0], pm->pm_sr[1], 1167 pm->pm_sr[2], pm->pm_sr[3], 1168 pm->pm_sr[4], pm->pm_sr[5], 1169 pm->pm_sr[6], pm->pm_sr[7], 1170 pm->pm_sr[8], pm->pm_sr[9], 1171 pm->pm_sr[10], pm->pm_sr[11], 1172 pm->pm_sr[12], pm->pm_sr[13], 1173 pm->pm_sr[14], pm->pm_sr[15]); 1174 return pm; 1175 } 1176 1177 /* 1178 * Initialize a preallocated and zeroed pmap structure. 1179 */ 1180 void 1181 pmap_pinit(pmap_t pm) 1182 { 1183 register_t entropy = MFTB(); 1184 register_t mask; 1185 int i; 1186 1187 /* 1188 * Allocate some segment registers for this pmap. 1189 */ 1190 pm->pm_refs = 1; 1191 PMAP_LOCK(); 1192 for (i = 0; i < NPMAPS; i += VSID_NBPW) { 1193 static register_t pmap_vsidcontext; 1194 register_t hash; 1195 unsigned int n; 1196 1197 /* Create a new value by multiplying by a prime adding in 1198 * entropy from the timebase register. This is to make the 1199 * VSID more random so that the PT Hash function collides 1200 * less often. (note that the prime causes gcc to do shifts 1201 * instead of a multiply) 1202 */ 1203 pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy; 1204 hash = pmap_vsidcontext & (NPMAPS - 1); 1205 if (hash == 0) { /* 0 is special, avoid it */ 1206 entropy += 0xbadf00d; 1207 continue; 1208 } 1209 n = hash >> 5; 1210 mask = 1L << (hash & (VSID_NBPW-1)); 1211 hash = pmap_vsidcontext; 1212 if (pmap_vsid_bitmap[n] & mask) { /* collision? */ 1213 /* anything free in this bucket? */ 1214 if (~pmap_vsid_bitmap[n] == 0) { 1215 entropy = hash ^ (hash >> 16); 1216 continue; 1217 } 1218 i = ffs(~pmap_vsid_bitmap[n]) - 1; 1219 mask = 1L << i; 1220 hash &= ~(VSID_NBPW-1); 1221 hash |= i; 1222 } 1223 hash &= PTE_VSID >> PTE_VSID_SHFT; 1224 pmap_vsid_bitmap[n] |= mask; 1225 pm->pm_vsid = hash; 1226 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 1227 for (i = 0; i < 16; i++) 1228 pm->pm_sr[i] = VSID_MAKE(i, hash) | SR_PRKEY | 1229 SR_NOEXEC; 1230 #endif 1231 PMAP_UNLOCK(); 1232 return; 1233 } 1234 PMAP_UNLOCK(); 1235 panic("pmap_pinit: out of segments"); 1236 } 1237 1238 /* 1239 * Add a reference to the given pmap. 1240 */ 1241 void 1242 pmap_reference(pmap_t pm) 1243 { 1244 atomic_inc_uint(&pm->pm_refs); 1245 } 1246 1247 /* 1248 * Retire the given pmap from service. 1249 * Should only be called if the map contains no valid mappings. 1250 */ 1251 void 1252 pmap_destroy(pmap_t pm) 1253 { 1254 if (atomic_dec_uint_nv(&pm->pm_refs) == 0) { 1255 pmap_release(pm); 1256 pool_put(&pmap_pool, pm); 1257 } 1258 } 1259 1260 /* 1261 * Release any resources held by the given physical map. 1262 * Called when a pmap initialized by pmap_pinit is being released. 1263 */ 1264 void 1265 pmap_release(pmap_t pm) 1266 { 1267 int idx, mask; 1268 1269 KASSERT(pm->pm_stats.resident_count == 0); 1270 KASSERT(pm->pm_stats.wired_count == 0); 1271 1272 PMAP_LOCK(); 1273 if (pm->pm_sr[0] == 0) 1274 panic("pmap_release"); 1275 idx = pm->pm_vsid & (NPMAPS-1); 1276 mask = 1 << (idx % VSID_NBPW); 1277 idx /= VSID_NBPW; 1278 1279 KASSERT(pmap_vsid_bitmap[idx] & mask); 1280 pmap_vsid_bitmap[idx] &= ~mask; 1281 PMAP_UNLOCK(); 1282 } 1283 1284 /* 1285 * Copy the range specified by src_addr/len 1286 * from the source map to the range dst_addr/len 1287 * in the destination map. 1288 * 1289 * This routine is only advisory and need not do anything. 1290 */ 1291 void 1292 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, 1293 vsize_t len, vaddr_t src_addr) 1294 { 1295 PMAPCOUNT(copies); 1296 } 1297 1298 /* 1299 * Require that all active physical maps contain no 1300 * incorrect entries NOW. 1301 */ 1302 void 1303 pmap_update(struct pmap *pmap) 1304 { 1305 PMAPCOUNT(updates); 1306 TLBSYNC(); 1307 } 1308 1309 static inline int 1310 pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 1311 { 1312 int pteidx; 1313 /* 1314 * We can find the actual pte entry without searching by 1315 * grabbing the PTEG index from 3 unused bits in pte_lo[11:9] 1316 * and by noticing the HID bit. 1317 */ 1318 pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); 1319 if (pvo->pvo_pte.pte_hi & PTE_HID) 1320 pteidx ^= pmap_pteg_mask * 8; 1321 return pteidx; 1322 } 1323 1324 volatile struct pte * 1325 pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 1326 { 1327 volatile struct pte *pt; 1328 1329 #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK) 1330 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) 1331 return NULL; 1332 #endif 1333 1334 /* 1335 * If we haven't been supplied the ptegidx, calculate it. 1336 */ 1337 if (pteidx == -1) { 1338 int ptegidx; 1339 ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr); 1340 pteidx = pmap_pvo_pte_index(pvo, ptegidx); 1341 } 1342 1343 pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7]; 1344 1345 #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK) 1346 return pt; 1347 #else 1348 if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { 1349 panic("pmap_pvo_to_pte: pvo %p: has valid pte in " 1350 "pvo but no valid pte index", pvo); 1351 } 1352 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { 1353 panic("pmap_pvo_to_pte: pvo %p: has valid pte index in " 1354 "pvo but no valid pte", pvo); 1355 } 1356 1357 if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { 1358 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) { 1359 #if defined(DEBUG) || defined(PMAPCHECK) 1360 pmap_pte_print(pt); 1361 #endif 1362 panic("pmap_pvo_to_pte: pvo %p: has valid pte in " 1363 "pmap_pteg_table %p but invalid in pvo", 1364 pvo, pt); 1365 } 1366 if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) != 0) { 1367 #if defined(DEBUG) || defined(PMAPCHECK) 1368 pmap_pte_print(pt); 1369 #endif 1370 panic("pmap_pvo_to_pte: pvo %p: pvo pte does " 1371 "not match pte %p in pmap_pteg_table", 1372 pvo, pt); 1373 } 1374 return pt; 1375 } 1376 1377 if (pvo->pvo_pte.pte_hi & PTE_VALID) { 1378 #if defined(DEBUG) || defined(PMAPCHECK) 1379 pmap_pte_print(pt); 1380 #endif 1381 panic("pmap_pvo_to_pte: pvo %p: has nomatching pte %p in " 1382 "pmap_pteg_table but valid in pvo", pvo, pt); 1383 } 1384 return NULL; 1385 #endif /* !(!DIAGNOSTIC && !DEBUG && !PMAPCHECK) */ 1386 } 1387 1388 struct pvo_entry * 1389 pmap_pvo_find_va(pmap_t pm, vaddr_t va, int *pteidx_p) 1390 { 1391 struct pvo_entry *pvo; 1392 int ptegidx; 1393 1394 va &= ~ADDR_POFF; 1395 ptegidx = va_to_pteg(pm, va); 1396 1397 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 1398 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1399 if ((uintptr_t) pvo >= SEGMENT_LENGTH) 1400 panic("pmap_pvo_find_va: invalid pvo %p on " 1401 "list %#x (%p)", pvo, ptegidx, 1402 &pmap_pvo_table[ptegidx]); 1403 #endif 1404 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1405 if (pteidx_p) 1406 *pteidx_p = pmap_pvo_pte_index(pvo, ptegidx); 1407 return pvo; 1408 } 1409 } 1410 if ((pm == pmap_kernel()) && (va < SEGMENT_LENGTH)) 1411 panic("%s: returning NULL for %s pmap, va: %#" _PRIxva "\n", 1412 __func__, (pm == pmap_kernel() ? "kernel" : "user"), va); 1413 return NULL; 1414 } 1415 1416 #if defined(DEBUG) || defined(PMAPCHECK) 1417 void 1418 pmap_pvo_check(const struct pvo_entry *pvo) 1419 { 1420 struct pvo_head *pvo_head; 1421 struct pvo_entry *pvo0; 1422 volatile struct pte *pt; 1423 int failed = 0; 1424 1425 PMAP_LOCK(); 1426 1427 if ((uintptr_t)(pvo+1) >= SEGMENT_LENGTH) 1428 panic("pmap_pvo_check: pvo %p: invalid address", pvo); 1429 1430 if ((uintptr_t)(pvo->pvo_pmap+1) >= SEGMENT_LENGTH) { 1431 printf("pmap_pvo_check: pvo %p: invalid pmap address %p\n", 1432 pvo, pvo->pvo_pmap); 1433 failed = 1; 1434 } 1435 1436 if ((uintptr_t)TAILQ_NEXT(pvo, pvo_olink) >= SEGMENT_LENGTH || 1437 (((uintptr_t)TAILQ_NEXT(pvo, pvo_olink)) & 0x1f) != 0) { 1438 printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n", 1439 pvo, TAILQ_NEXT(pvo, pvo_olink)); 1440 failed = 1; 1441 } 1442 1443 if ((uintptr_t)LIST_NEXT(pvo, pvo_vlink) >= SEGMENT_LENGTH || 1444 (((uintptr_t)LIST_NEXT(pvo, pvo_vlink)) & 0x1f) != 0) { 1445 printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n", 1446 pvo, LIST_NEXT(pvo, pvo_vlink)); 1447 failed = 1; 1448 } 1449 1450 if (PVO_MANAGED_P(pvo)) { 1451 pvo_head = pa_to_pvoh(pvo->pvo_pte.pte_lo & PTE_RPGN, NULL); 1452 } else { 1453 if (pvo->pvo_vaddr < VM_MIN_KERNEL_ADDRESS) { 1454 printf("pmap_pvo_check: pvo %p: non kernel address " 1455 "on kernel unmanaged list\n", pvo); 1456 failed = 1; 1457 } 1458 pvo_head = &pmap_pvo_kunmanaged; 1459 } 1460 LIST_FOREACH(pvo0, pvo_head, pvo_vlink) { 1461 if (pvo0 == pvo) 1462 break; 1463 } 1464 if (pvo0 == NULL) { 1465 printf("pmap_pvo_check: pvo %p: not present " 1466 "on its vlist head %p\n", pvo, pvo_head); 1467 failed = 1; 1468 } 1469 if (pvo != pmap_pvo_find_va(pvo->pvo_pmap, pvo->pvo_vaddr, NULL)) { 1470 printf("pmap_pvo_check: pvo %p: not present " 1471 "on its olist head\n", pvo); 1472 failed = 1; 1473 } 1474 pt = pmap_pvo_to_pte(pvo, -1); 1475 if (pt == NULL) { 1476 if (pvo->pvo_pte.pte_hi & PTE_VALID) { 1477 printf("pmap_pvo_check: pvo %p: pte_hi VALID but " 1478 "no PTE\n", pvo); 1479 failed = 1; 1480 } 1481 } else { 1482 if ((uintptr_t) pt < (uintptr_t) &pmap_pteg_table[0] || 1483 (uintptr_t) pt >= 1484 (uintptr_t) &pmap_pteg_table[pmap_pteg_cnt]) { 1485 printf("pmap_pvo_check: pvo %p: pte %p not in " 1486 "pteg table\n", pvo, pt); 1487 failed = 1; 1488 } 1489 if (((((uintptr_t) pt) >> 3) & 7) != PVO_PTEGIDX_GET(pvo)) { 1490 printf("pmap_pvo_check: pvo %p: pte_hi VALID but " 1491 "no PTE\n", pvo); 1492 failed = 1; 1493 } 1494 if (pvo->pvo_pte.pte_hi != pt->pte_hi) { 1495 printf("pmap_pvo_check: pvo %p: pte_hi differ: " 1496 "%#" _PRIxpte "/%#" _PRIxpte "\n", pvo, 1497 pvo->pvo_pte.pte_hi, 1498 pt->pte_hi); 1499 failed = 1; 1500 } 1501 if (((pvo->pvo_pte.pte_lo ^ pt->pte_lo) & 1502 (PTE_PP|PTE_WIMG|PTE_RPGN)) != 0) { 1503 printf("pmap_pvo_check: pvo %p: pte_lo differ: " 1504 "%#" _PRIxpte "/%#" _PRIxpte "\n", pvo, 1505 (pvo->pvo_pte.pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN)), 1506 (pt->pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN))); 1507 failed = 1; 1508 } 1509 if ((pmap_pte_to_va(pt) ^ PVO_VADDR(pvo)) & 0x0fffffff) { 1510 printf("pmap_pvo_check: pvo %p: PTE %p derived VA %#" _PRIxva "" 1511 " doesn't not match PVO's VA %#" _PRIxva "\n", 1512 pvo, pt, pmap_pte_to_va(pt), PVO_VADDR(pvo)); 1513 failed = 1; 1514 } 1515 if (failed) 1516 pmap_pte_print(pt); 1517 } 1518 if (failed) 1519 panic("pmap_pvo_check: pvo %p, pm %p: bugcheck!", pvo, 1520 pvo->pvo_pmap); 1521 1522 PMAP_UNLOCK(); 1523 } 1524 #endif /* DEBUG || PMAPCHECK */ 1525 1526 /* 1527 * Search the PVO table looking for a non-wired entry. 1528 * If we find one, remove it and return it. 1529 */ 1530 1531 struct pvo_entry * 1532 pmap_pvo_reclaim(struct pmap *pm) 1533 { 1534 struct pvo_tqhead *pvoh; 1535 struct pvo_entry *pvo; 1536 uint32_t idx, endidx; 1537 1538 endidx = pmap_pvo_reclaim_nextidx; 1539 for (idx = (endidx + 1) & pmap_pteg_mask; idx != endidx; 1540 idx = (idx + 1) & pmap_pteg_mask) { 1541 pvoh = &pmap_pvo_table[idx]; 1542 TAILQ_FOREACH(pvo, pvoh, pvo_olink) { 1543 if (!PVO_WIRED_P(pvo)) { 1544 pmap_pvo_remove(pvo, -1, NULL); 1545 pmap_pvo_reclaim_nextidx = idx; 1546 PMAPCOUNT(pvos_reclaimed); 1547 return pvo; 1548 } 1549 } 1550 } 1551 return NULL; 1552 } 1553 1554 static struct pool * 1555 pmap_pvo_pl(struct pvo_entry *pvo) 1556 { 1557 1558 return PVO_MANAGED_P(pvo) ? &pmap_mpvo_pool : &pmap_upvo_pool; 1559 } 1560 1561 /* 1562 * This returns whether this is the first mapping of a page. 1563 */ 1564 int 1565 pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head, 1566 vaddr_t va, paddr_t pa, register_t pte_lo, int flags) 1567 { 1568 struct pvo_entry *pvo; 1569 struct pvo_tqhead *pvoh; 1570 register_t msr; 1571 int ptegidx; 1572 int i; 1573 int poolflags = PR_NOWAIT; 1574 1575 /* 1576 * Compute the PTE Group index. 1577 */ 1578 va &= ~ADDR_POFF; 1579 ptegidx = va_to_pteg(pm, va); 1580 1581 msr = pmap_interrupts_off(); 1582 1583 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1584 if (pmap_pvo_remove_depth > 0) 1585 panic("pmap_pvo_enter: called while pmap_pvo_remove active!"); 1586 if (++pmap_pvo_enter_depth > 1) 1587 panic("pmap_pvo_enter: called recursively!"); 1588 #endif 1589 1590 /* 1591 * Remove any existing mapping for this page. Reuse the 1592 * pvo entry if there a mapping. 1593 */ 1594 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 1595 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1596 #ifdef DEBUG 1597 if ((pmapdebug & PMAPDEBUG_PVOENTER) && 1598 ((pvo->pvo_pte.pte_lo ^ (pa|pte_lo)) & 1599 ~(PTE_REF|PTE_CHG)) == 0 && 1600 va < VM_MIN_KERNEL_ADDRESS) { 1601 printf("pmap_pvo_enter: pvo %p: dup %#" _PRIxpte "/%#" _PRIxpa "\n", 1602 pvo, pvo->pvo_pte.pte_lo, pte_lo|pa); 1603 printf("pmap_pvo_enter: pte_hi=%#" _PRIxpte " sr=%#" _PRIsr "\n", 1604 pvo->pvo_pte.pte_hi, 1605 pm->pm_sr[va >> ADDR_SR_SHFT]); 1606 pmap_pte_print(pmap_pvo_to_pte(pvo, -1)); 1607 #ifdef DDBX 1608 Debugger(); 1609 #endif 1610 } 1611 #endif 1612 PMAPCOUNT(mappings_replaced); 1613 pmap_pvo_remove(pvo, -1, NULL); 1614 break; 1615 } 1616 } 1617 1618 /* 1619 * If we aren't overwriting an mapping, try to allocate 1620 */ 1621 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1622 --pmap_pvo_enter_depth; 1623 #endif 1624 pmap_interrupts_restore(msr); 1625 if (pvo) { 1626 KASSERT(pmap_pvo_pl(pvo) == pl); 1627 } else { 1628 pvo = pool_get(pl, poolflags); 1629 } 1630 KASSERT((vaddr_t)pvo < VM_MIN_KERNEL_ADDRESS); 1631 1632 #ifdef DEBUG 1633 /* 1634 * Exercise pmap_pvo_reclaim() a little. 1635 */ 1636 if (pvo && (flags & PMAP_CANFAIL) != 0 && 1637 pmap_pvo_reclaim_debugctr++ > 0x1000 && 1638 (pmap_pvo_reclaim_debugctr & 0xff) == 0) { 1639 pool_put(pl, pvo); 1640 pvo = NULL; 1641 } 1642 #endif 1643 1644 msr = pmap_interrupts_off(); 1645 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1646 ++pmap_pvo_enter_depth; 1647 #endif 1648 if (pvo == NULL) { 1649 pvo = pmap_pvo_reclaim(pm); 1650 if (pvo == NULL) { 1651 if ((flags & PMAP_CANFAIL) == 0) 1652 panic("pmap_pvo_enter: failed"); 1653 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1654 pmap_pvo_enter_depth--; 1655 #endif 1656 PMAPCOUNT(pvos_failed); 1657 pmap_interrupts_restore(msr); 1658 return ENOMEM; 1659 } 1660 } 1661 1662 pvo->pvo_vaddr = va; 1663 pvo->pvo_pmap = pm; 1664 pvo->pvo_vaddr &= ~ADDR_POFF; 1665 if (flags & VM_PROT_EXECUTE) { 1666 PMAPCOUNT(exec_mappings); 1667 pvo_set_exec(pvo); 1668 } 1669 if (flags & PMAP_WIRED) 1670 pvo->pvo_vaddr |= PVO_WIRED; 1671 if (pvo_head != &pmap_pvo_kunmanaged) { 1672 pvo->pvo_vaddr |= PVO_MANAGED; 1673 PMAPCOUNT(mappings); 1674 } else { 1675 PMAPCOUNT(kernel_mappings); 1676 } 1677 pmap_pte_create(&pvo->pvo_pte, pm, va, pa | pte_lo); 1678 1679 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 1680 if (PVO_WIRED_P(pvo)) 1681 pvo->pvo_pmap->pm_stats.wired_count++; 1682 pvo->pvo_pmap->pm_stats.resident_count++; 1683 #if defined(DEBUG) 1684 /* if (pm != pmap_kernel() && va < VM_MIN_KERNEL_ADDRESS) */ 1685 DPRINTFN(PVOENTER, 1686 "pmap_pvo_enter: pvo %p: pm %p va %#" _PRIxva " pa %#" _PRIxpa "\n", 1687 pvo, pm, va, pa); 1688 #endif 1689 1690 /* 1691 * We hope this succeeds but it isn't required. 1692 */ 1693 pvoh = &pmap_pvo_table[ptegidx]; 1694 i = pmap_pte_insert(ptegidx, &pvo->pvo_pte); 1695 if (i >= 0) { 1696 PVO_PTEGIDX_SET(pvo, i); 1697 PVO_WHERE(pvo, ENTER_INSERT); 1698 PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID) 1699 ? pmap_evcnt_ptes_secondary : pmap_evcnt_ptes_primary)[i]); 1700 TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink); 1701 1702 } else { 1703 /* 1704 * Since we didn't have room for this entry (which makes it 1705 * and evicted entry), place it at the head of the list. 1706 */ 1707 TAILQ_INSERT_HEAD(pvoh, pvo, pvo_olink); 1708 PMAPCOUNT(ptes_evicted); 1709 pm->pm_evictions++; 1710 /* 1711 * If this is a kernel page, make sure it's active. 1712 */ 1713 if (pm == pmap_kernel()) { 1714 i = pmap_pte_spill(pm, va, false); 1715 KASSERT(i); 1716 } 1717 } 1718 PMAP_PVO_CHECK(pvo); /* sanity check */ 1719 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1720 pmap_pvo_enter_depth--; 1721 #endif 1722 pmap_interrupts_restore(msr); 1723 return 0; 1724 } 1725 1726 static void 1727 pmap_pvo_remove(struct pvo_entry *pvo, int pteidx, struct pvo_head *pvol) 1728 { 1729 volatile struct pte *pt; 1730 int ptegidx; 1731 1732 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1733 if (++pmap_pvo_remove_depth > 1) 1734 panic("pmap_pvo_remove: called recursively!"); 1735 #endif 1736 1737 /* 1738 * If we haven't been supplied the ptegidx, calculate it. 1739 */ 1740 if (pteidx == -1) { 1741 ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr); 1742 pteidx = pmap_pvo_pte_index(pvo, ptegidx); 1743 } else { 1744 ptegidx = pteidx >> 3; 1745 if (pvo->pvo_pte.pte_hi & PTE_HID) 1746 ptegidx ^= pmap_pteg_mask; 1747 } 1748 PMAP_PVO_CHECK(pvo); /* sanity check */ 1749 1750 /* 1751 * If there is an active pte entry, we need to deactivate it 1752 * (and save the ref & chg bits). 1753 */ 1754 pt = pmap_pvo_to_pte(pvo, pteidx); 1755 if (pt != NULL) { 1756 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1757 PVO_WHERE(pvo, REMOVE); 1758 PVO_PTEGIDX_CLR(pvo); 1759 PMAPCOUNT(ptes_removed); 1760 } else { 1761 KASSERT(pvo->pvo_pmap->pm_evictions > 0); 1762 pvo->pvo_pmap->pm_evictions--; 1763 } 1764 1765 /* 1766 * Account for executable mappings. 1767 */ 1768 if (PVO_EXECUTABLE_P(pvo)) 1769 pvo_clear_exec(pvo); 1770 1771 /* 1772 * Update our statistics. 1773 */ 1774 pvo->pvo_pmap->pm_stats.resident_count--; 1775 if (PVO_WIRED_P(pvo)) 1776 pvo->pvo_pmap->pm_stats.wired_count--; 1777 1778 /* 1779 * Save the REF/CHG bits into their cache if the page is managed. 1780 */ 1781 if (PVO_MANAGED_P(pvo)) { 1782 register_t ptelo = pvo->pvo_pte.pte_lo; 1783 struct vm_page *pg = PHYS_TO_VM_PAGE(ptelo & PTE_RPGN); 1784 1785 if (pg != NULL) { 1786 /* 1787 * If this page was changed and it is mapped exec, 1788 * invalidate it. 1789 */ 1790 if ((ptelo & PTE_CHG) && 1791 (pmap_attr_fetch(pg) & PTE_EXEC)) { 1792 struct pvo_head *pvoh = vm_page_to_pvoh(pg); 1793 if (LIST_EMPTY(pvoh)) { 1794 DPRINTFN(EXEC, "[pmap_pvo_remove: " 1795 "%#" _PRIxpa ": clear-exec]\n", 1796 VM_PAGE_TO_PHYS(pg)); 1797 pmap_attr_clear(pg, PTE_EXEC); 1798 PMAPCOUNT(exec_uncached_pvo_remove); 1799 } else { 1800 DPRINTFN(EXEC, "[pmap_pvo_remove: " 1801 "%#" _PRIxpa ": syncicache]\n", 1802 VM_PAGE_TO_PHYS(pg)); 1803 pmap_syncicache(VM_PAGE_TO_PHYS(pg), 1804 PAGE_SIZE); 1805 PMAPCOUNT(exec_synced_pvo_remove); 1806 } 1807 } 1808 1809 pmap_attr_save(pg, ptelo & (PTE_REF|PTE_CHG)); 1810 } 1811 PMAPCOUNT(unmappings); 1812 } else { 1813 PMAPCOUNT(kernel_unmappings); 1814 } 1815 1816 /* 1817 * Remove the PVO from its lists and return it to the pool. 1818 */ 1819 LIST_REMOVE(pvo, pvo_vlink); 1820 TAILQ_REMOVE(&pmap_pvo_table[ptegidx], pvo, pvo_olink); 1821 if (pvol) { 1822 LIST_INSERT_HEAD(pvol, pvo, pvo_vlink); 1823 } 1824 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1825 pmap_pvo_remove_depth--; 1826 #endif 1827 } 1828 1829 void 1830 pmap_pvo_free(struct pvo_entry *pvo) 1831 { 1832 1833 pool_put(pmap_pvo_pl(pvo), pvo); 1834 } 1835 1836 void 1837 pmap_pvo_free_list(struct pvo_head *pvol) 1838 { 1839 struct pvo_entry *pvo, *npvo; 1840 1841 for (pvo = LIST_FIRST(pvol); pvo != NULL; pvo = npvo) { 1842 npvo = LIST_NEXT(pvo, pvo_vlink); 1843 LIST_REMOVE(pvo, pvo_vlink); 1844 pmap_pvo_free(pvo); 1845 } 1846 } 1847 1848 /* 1849 * Mark a mapping as executable. 1850 * If this is the first executable mapping in the segment, 1851 * clear the noexec flag. 1852 */ 1853 static void 1854 pvo_set_exec(struct pvo_entry *pvo) 1855 { 1856 struct pmap *pm = pvo->pvo_pmap; 1857 1858 if (pm == pmap_kernel() || PVO_EXECUTABLE_P(pvo)) { 1859 return; 1860 } 1861 pvo->pvo_vaddr |= PVO_EXECUTABLE; 1862 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 1863 { 1864 int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT; 1865 if (pm->pm_exec[sr]++ == 0) { 1866 pm->pm_sr[sr] &= ~SR_NOEXEC; 1867 } 1868 } 1869 #endif 1870 } 1871 1872 /* 1873 * Mark a mapping as non-executable. 1874 * If this was the last executable mapping in the segment, 1875 * set the noexec flag. 1876 */ 1877 static void 1878 pvo_clear_exec(struct pvo_entry *pvo) 1879 { 1880 struct pmap *pm = pvo->pvo_pmap; 1881 1882 if (pm == pmap_kernel() || !PVO_EXECUTABLE_P(pvo)) { 1883 return; 1884 } 1885 pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 1886 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 1887 { 1888 int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT; 1889 if (--pm->pm_exec[sr] == 0) { 1890 pm->pm_sr[sr] |= SR_NOEXEC; 1891 } 1892 } 1893 #endif 1894 } 1895 1896 /* 1897 * Insert physical page at pa into the given pmap at virtual address va. 1898 */ 1899 int 1900 pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 1901 { 1902 struct mem_region *mp; 1903 struct pvo_head *pvo_head; 1904 struct vm_page *pg; 1905 struct pool *pl; 1906 register_t pte_lo; 1907 int error; 1908 u_int was_exec = 0; 1909 1910 PMAP_LOCK(); 1911 1912 if (__predict_false(!pmap_initialized)) { 1913 pvo_head = &pmap_pvo_kunmanaged; 1914 pl = &pmap_upvo_pool; 1915 pg = NULL; 1916 was_exec = PTE_EXEC; 1917 } else { 1918 pvo_head = pa_to_pvoh(pa, &pg); 1919 pl = &pmap_mpvo_pool; 1920 } 1921 1922 DPRINTFN(ENTER, 1923 "pmap_enter(%p, %#" _PRIxva ", %#" _PRIxpa ", 0x%x, 0x%x):", 1924 pm, va, pa, prot, flags); 1925 1926 /* 1927 * If this is a managed page, and it's the first reference to the 1928 * page clear the execness of the page. Otherwise fetch the execness. 1929 */ 1930 if (pg != NULL) 1931 was_exec = pmap_attr_fetch(pg) & PTE_EXEC; 1932 1933 DPRINTFN(ENTER, " was_exec=%d", was_exec); 1934 1935 /* 1936 * Assume the page is cache inhibited and access is guarded unless 1937 * it's in our available memory array. If it is in the memory array, 1938 * asssume it's in memory coherent memory. 1939 */ 1940 if (flags & PMAP_MD_PREFETCHABLE) { 1941 pte_lo = 0; 1942 } else 1943 pte_lo = PTE_G; 1944 1945 if ((flags & PMAP_NOCACHE) == 0) { 1946 for (mp = mem; mp->size; mp++) { 1947 if (pa >= mp->start && pa < mp->start + mp->size) { 1948 pte_lo = PTE_M; 1949 break; 1950 } 1951 } 1952 #ifdef MULTIPROCESSOR 1953 if (((mfpvr() >> 16) & 0xffff) == MPC603e) 1954 pte_lo = PTE_M; 1955 #endif 1956 } else { 1957 pte_lo |= PTE_I; 1958 } 1959 1960 if (prot & VM_PROT_WRITE) 1961 pte_lo |= PTE_BW; 1962 else 1963 pte_lo |= PTE_BR; 1964 1965 /* 1966 * If this was in response to a fault, "pre-fault" the PTE's 1967 * changed/referenced bit appropriately. 1968 */ 1969 if (flags & VM_PROT_WRITE) 1970 pte_lo |= PTE_CHG; 1971 if (flags & VM_PROT_ALL) 1972 pte_lo |= PTE_REF; 1973 1974 /* 1975 * We need to know if this page can be executable 1976 */ 1977 flags |= (prot & VM_PROT_EXECUTE); 1978 1979 /* 1980 * Record mapping for later back-translation and pte spilling. 1981 * This will overwrite any existing mapping. 1982 */ 1983 error = pmap_pvo_enter(pm, pl, pvo_head, va, pa, pte_lo, flags); 1984 1985 /* 1986 * Flush the real page from the instruction cache if this page is 1987 * mapped executable and cacheable and has not been flushed since 1988 * the last time it was modified. 1989 */ 1990 if (error == 0 && 1991 (flags & VM_PROT_EXECUTE) && 1992 (pte_lo & PTE_I) == 0 && 1993 was_exec == 0) { 1994 DPRINTFN(ENTER, " %s", "syncicache"); 1995 PMAPCOUNT(exec_synced); 1996 pmap_syncicache(pa, PAGE_SIZE); 1997 if (pg != NULL) { 1998 pmap_attr_save(pg, PTE_EXEC); 1999 PMAPCOUNT(exec_cached); 2000 #if defined(DEBUG) || defined(PMAPDEBUG) 2001 if (pmapdebug & PMAPDEBUG_ENTER) 2002 printf(" marked-as-exec"); 2003 else if (pmapdebug & PMAPDEBUG_EXEC) 2004 printf("[pmap_enter: %#" _PRIxpa ": marked-as-exec]\n", 2005 VM_PAGE_TO_PHYS(pg)); 2006 2007 #endif 2008 } 2009 } 2010 2011 DPRINTFN(ENTER, ": error=%d\n", error); 2012 2013 PMAP_UNLOCK(); 2014 2015 return error; 2016 } 2017 2018 void 2019 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 2020 { 2021 struct mem_region *mp; 2022 register_t pte_lo; 2023 int error; 2024 2025 #if defined (PMAP_OEA64_BRIDGE) || defined (PMAP_OEA) 2026 if (va < VM_MIN_KERNEL_ADDRESS) 2027 panic("pmap_kenter_pa: attempt to enter " 2028 "non-kernel address %#" _PRIxva "!", va); 2029 #endif 2030 2031 DPRINTFN(KENTER, 2032 "pmap_kenter_pa(%#" _PRIxva ",%#" _PRIxpa ",%#x)\n", va, pa, prot); 2033 2034 PMAP_LOCK(); 2035 2036 /* 2037 * Assume the page is cache inhibited and access is guarded unless 2038 * it's in our available memory array. If it is in the memory array, 2039 * asssume it's in memory coherent memory. 2040 */ 2041 pte_lo = PTE_IG; 2042 if ((flags & PMAP_NOCACHE) == 0) { 2043 for (mp = mem; mp->size; mp++) { 2044 if (pa >= mp->start && pa < mp->start + mp->size) { 2045 pte_lo = PTE_M; 2046 break; 2047 } 2048 } 2049 #ifdef MULTIPROCESSOR 2050 if (((mfpvr() >> 16) & 0xffff) == MPC603e) 2051 pte_lo = PTE_M; 2052 #endif 2053 } 2054 2055 if (prot & VM_PROT_WRITE) 2056 pte_lo |= PTE_BW; 2057 else 2058 pte_lo |= PTE_BR; 2059 2060 /* 2061 * We don't care about REF/CHG on PVOs on the unmanaged list. 2062 */ 2063 error = pmap_pvo_enter(pmap_kernel(), &pmap_upvo_pool, 2064 &pmap_pvo_kunmanaged, va, pa, pte_lo, prot|PMAP_WIRED); 2065 2066 if (error != 0) 2067 panic("pmap_kenter_pa: failed to enter va %#" _PRIxva " pa %#" _PRIxpa ": %d", 2068 va, pa, error); 2069 2070 PMAP_UNLOCK(); 2071 } 2072 2073 void 2074 pmap_kremove(vaddr_t va, vsize_t len) 2075 { 2076 if (va < VM_MIN_KERNEL_ADDRESS) 2077 panic("pmap_kremove: attempt to remove " 2078 "non-kernel address %#" _PRIxva "!", va); 2079 2080 DPRINTFN(KREMOVE, "pmap_kremove(%#" _PRIxva ",%#" _PRIxva ")\n", va, len); 2081 pmap_remove(pmap_kernel(), va, va + len); 2082 } 2083 2084 /* 2085 * Remove the given range of mapping entries. 2086 */ 2087 void 2088 pmap_remove(pmap_t pm, vaddr_t va, vaddr_t endva) 2089 { 2090 struct pvo_head pvol; 2091 struct pvo_entry *pvo; 2092 register_t msr; 2093 int pteidx; 2094 2095 PMAP_LOCK(); 2096 LIST_INIT(&pvol); 2097 msr = pmap_interrupts_off(); 2098 for (; va < endva; va += PAGE_SIZE) { 2099 pvo = pmap_pvo_find_va(pm, va, &pteidx); 2100 if (pvo != NULL) { 2101 pmap_pvo_remove(pvo, pteidx, &pvol); 2102 } 2103 } 2104 pmap_interrupts_restore(msr); 2105 pmap_pvo_free_list(&pvol); 2106 PMAP_UNLOCK(); 2107 } 2108 2109 /* 2110 * Get the physical page address for the given pmap/virtual address. 2111 */ 2112 bool 2113 pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap) 2114 { 2115 struct pvo_entry *pvo; 2116 register_t msr; 2117 2118 PMAP_LOCK(); 2119 2120 /* 2121 * If this is a kernel pmap lookup, also check the battable 2122 * and if we get a hit, translate the VA to a PA using the 2123 * BAT entries. Don't check for VM_MAX_KERNEL_ADDRESS is 2124 * that will wrap back to 0. 2125 */ 2126 if (pm == pmap_kernel() && 2127 (va < VM_MIN_KERNEL_ADDRESS || 2128 (KERNEL2_SR < 15 && VM_MAX_KERNEL_ADDRESS <= va))) { 2129 KASSERT((va >> ADDR_SR_SHFT) != USER_SR); 2130 #if defined (PMAP_OEA) 2131 #ifdef PPC_OEA601 2132 if ((MFPVR() >> 16) == MPC601) { 2133 register_t batu = battable[va >> 23].batu; 2134 register_t batl = battable[va >> 23].batl; 2135 register_t sr = iosrtable[va >> ADDR_SR_SHFT]; 2136 if (BAT601_VALID_P(batl) && 2137 BAT601_VA_MATCH_P(batu, batl, va)) { 2138 register_t mask = 2139 (~(batl & BAT601_BSM) << 17) & ~0x1ffffL; 2140 if (pap) 2141 *pap = (batl & mask) | (va & ~mask); 2142 PMAP_UNLOCK(); 2143 return true; 2144 } else if (SR601_VALID_P(sr) && 2145 SR601_PA_MATCH_P(sr, va)) { 2146 if (pap) 2147 *pap = va; 2148 PMAP_UNLOCK(); 2149 return true; 2150 } 2151 } else 2152 #endif /* PPC_OEA601 */ 2153 { 2154 register_t batu = battable[BAT_VA2IDX(va)].batu; 2155 if (BAT_VALID_P(batu,0) && BAT_VA_MATCH_P(batu,va)) { 2156 register_t batl = battable[BAT_VA2IDX(va)].batl; 2157 register_t mask = 2158 (~(batu & (BAT_XBL|BAT_BL)) << 15) & ~0x1ffffL; 2159 if (pap) 2160 *pap = (batl & mask) | (va & ~mask); 2161 PMAP_UNLOCK(); 2162 return true; 2163 } 2164 } 2165 return false; 2166 #elif defined (PMAP_OEA64_BRIDGE) 2167 if (va >= SEGMENT_LENGTH) 2168 panic("%s: pm: %s va >= SEGMENT_LENGTH, va: 0x%08lx\n", 2169 __func__, (pm == pmap_kernel() ? "kernel" : "user"), va); 2170 else { 2171 if (pap) 2172 *pap = va; 2173 PMAP_UNLOCK(); 2174 return true; 2175 } 2176 #elif defined (PMAP_OEA64) 2177 #error PPC_OEA64 not supported 2178 #endif /* PPC_OEA */ 2179 } 2180 2181 msr = pmap_interrupts_off(); 2182 pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 2183 if (pvo != NULL) { 2184 PMAP_PVO_CHECK(pvo); /* sanity check */ 2185 if (pap) 2186 *pap = (pvo->pvo_pte.pte_lo & PTE_RPGN) 2187 | (va & ADDR_POFF); 2188 } 2189 pmap_interrupts_restore(msr); 2190 PMAP_UNLOCK(); 2191 return pvo != NULL; 2192 } 2193 2194 /* 2195 * Lower the protection on the specified range of this pmap. 2196 */ 2197 void 2198 pmap_protect(pmap_t pm, vaddr_t va, vaddr_t endva, vm_prot_t prot) 2199 { 2200 struct pvo_entry *pvo; 2201 volatile struct pte *pt; 2202 register_t msr; 2203 int pteidx; 2204 2205 /* 2206 * Since this routine only downgrades protection, we should 2207 * always be called with at least one bit not set. 2208 */ 2209 KASSERT(prot != VM_PROT_ALL); 2210 2211 /* 2212 * If there is no protection, this is equivalent to 2213 * remove the pmap from the pmap. 2214 */ 2215 if ((prot & VM_PROT_READ) == 0) { 2216 pmap_remove(pm, va, endva); 2217 return; 2218 } 2219 2220 PMAP_LOCK(); 2221 2222 msr = pmap_interrupts_off(); 2223 for (; va < endva; va += PAGE_SIZE) { 2224 pvo = pmap_pvo_find_va(pm, va, &pteidx); 2225 if (pvo == NULL) 2226 continue; 2227 PMAP_PVO_CHECK(pvo); /* sanity check */ 2228 2229 /* 2230 * Revoke executable if asked to do so. 2231 */ 2232 if ((prot & VM_PROT_EXECUTE) == 0) 2233 pvo_clear_exec(pvo); 2234 2235 #if 0 2236 /* 2237 * If the page is already read-only, no change 2238 * needs to be made. 2239 */ 2240 if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) 2241 continue; 2242 #endif 2243 /* 2244 * Grab the PTE pointer before we diddle with 2245 * the cached PTE copy. 2246 */ 2247 pt = pmap_pvo_to_pte(pvo, pteidx); 2248 /* 2249 * Change the protection of the page. 2250 */ 2251 pvo->pvo_pte.pte_lo &= ~PTE_PP; 2252 pvo->pvo_pte.pte_lo |= PTE_BR; 2253 2254 /* 2255 * If the PVO is in the page table, update 2256 * that pte at well. 2257 */ 2258 if (pt != NULL) { 2259 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 2260 PVO_WHERE(pvo, PMAP_PROTECT); 2261 PMAPCOUNT(ptes_changed); 2262 } 2263 2264 PMAP_PVO_CHECK(pvo); /* sanity check */ 2265 } 2266 pmap_interrupts_restore(msr); 2267 PMAP_UNLOCK(); 2268 } 2269 2270 void 2271 pmap_unwire(pmap_t pm, vaddr_t va) 2272 { 2273 struct pvo_entry *pvo; 2274 register_t msr; 2275 2276 PMAP_LOCK(); 2277 msr = pmap_interrupts_off(); 2278 pvo = pmap_pvo_find_va(pm, va, NULL); 2279 if (pvo != NULL) { 2280 if (PVO_WIRED_P(pvo)) { 2281 pvo->pvo_vaddr &= ~PVO_WIRED; 2282 pm->pm_stats.wired_count--; 2283 } 2284 PMAP_PVO_CHECK(pvo); /* sanity check */ 2285 } 2286 pmap_interrupts_restore(msr); 2287 PMAP_UNLOCK(); 2288 } 2289 2290 /* 2291 * Lower the protection on the specified physical page. 2292 */ 2293 void 2294 pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 2295 { 2296 struct pvo_head *pvo_head, pvol; 2297 struct pvo_entry *pvo, *next_pvo; 2298 volatile struct pte *pt; 2299 register_t msr; 2300 2301 PMAP_LOCK(); 2302 2303 KASSERT(prot != VM_PROT_ALL); 2304 LIST_INIT(&pvol); 2305 msr = pmap_interrupts_off(); 2306 2307 /* 2308 * When UVM reuses a page, it does a pmap_page_protect with 2309 * VM_PROT_NONE. At that point, we can clear the exec flag 2310 * since we know the page will have different contents. 2311 */ 2312 if ((prot & VM_PROT_READ) == 0) { 2313 DPRINTFN(EXEC, "[pmap_page_protect: %#" _PRIxpa ": clear-exec]\n", 2314 VM_PAGE_TO_PHYS(pg)); 2315 if (pmap_attr_fetch(pg) & PTE_EXEC) { 2316 PMAPCOUNT(exec_uncached_page_protect); 2317 pmap_attr_clear(pg, PTE_EXEC); 2318 } 2319 } 2320 2321 pvo_head = vm_page_to_pvoh(pg); 2322 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 2323 next_pvo = LIST_NEXT(pvo, pvo_vlink); 2324 PMAP_PVO_CHECK(pvo); /* sanity check */ 2325 2326 /* 2327 * Downgrading to no mapping at all, we just remove the entry. 2328 */ 2329 if ((prot & VM_PROT_READ) == 0) { 2330 pmap_pvo_remove(pvo, -1, &pvol); 2331 continue; 2332 } 2333 2334 /* 2335 * If EXEC permission is being revoked, just clear the 2336 * flag in the PVO. 2337 */ 2338 if ((prot & VM_PROT_EXECUTE) == 0) 2339 pvo_clear_exec(pvo); 2340 2341 /* 2342 * If this entry is already RO, don't diddle with the 2343 * page table. 2344 */ 2345 if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) { 2346 PMAP_PVO_CHECK(pvo); 2347 continue; 2348 } 2349 2350 /* 2351 * Grab the PTE before the we diddle the bits so 2352 * pvo_to_pte can verify the pte contents are as 2353 * expected. 2354 */ 2355 pt = pmap_pvo_to_pte(pvo, -1); 2356 pvo->pvo_pte.pte_lo &= ~PTE_PP; 2357 pvo->pvo_pte.pte_lo |= PTE_BR; 2358 if (pt != NULL) { 2359 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 2360 PVO_WHERE(pvo, PMAP_PAGE_PROTECT); 2361 PMAPCOUNT(ptes_changed); 2362 } 2363 PMAP_PVO_CHECK(pvo); /* sanity check */ 2364 } 2365 pmap_interrupts_restore(msr); 2366 pmap_pvo_free_list(&pvol); 2367 2368 PMAP_UNLOCK(); 2369 } 2370 2371 /* 2372 * Activate the address space for the specified process. If the process 2373 * is the current process, load the new MMU context. 2374 */ 2375 void 2376 pmap_activate(struct lwp *l) 2377 { 2378 struct pcb *pcb = lwp_getpcb(l); 2379 pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap; 2380 2381 DPRINTFN(ACTIVATE, 2382 "pmap_activate: lwp %p (curlwp %p)\n", l, curlwp); 2383 2384 /* 2385 * XXX Normally performed in cpu_lwp_fork(). 2386 */ 2387 pcb->pcb_pm = pmap; 2388 2389 /* 2390 * In theory, the SR registers need only be valid on return 2391 * to user space wait to do them there. 2392 */ 2393 if (l == curlwp) { 2394 /* Store pointer to new current pmap. */ 2395 curpm = pmap; 2396 } 2397 } 2398 2399 /* 2400 * Deactivate the specified process's address space. 2401 */ 2402 void 2403 pmap_deactivate(struct lwp *l) 2404 { 2405 } 2406 2407 bool 2408 pmap_query_bit(struct vm_page *pg, int ptebit) 2409 { 2410 struct pvo_entry *pvo; 2411 volatile struct pte *pt; 2412 register_t msr; 2413 2414 PMAP_LOCK(); 2415 2416 if (pmap_attr_fetch(pg) & ptebit) { 2417 PMAP_UNLOCK(); 2418 return true; 2419 } 2420 2421 msr = pmap_interrupts_off(); 2422 LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) { 2423 PMAP_PVO_CHECK(pvo); /* sanity check */ 2424 /* 2425 * See if we saved the bit off. If so cache, it and return 2426 * success. 2427 */ 2428 if (pvo->pvo_pte.pte_lo & ptebit) { 2429 pmap_attr_save(pg, ptebit); 2430 PMAP_PVO_CHECK(pvo); /* sanity check */ 2431 pmap_interrupts_restore(msr); 2432 PMAP_UNLOCK(); 2433 return true; 2434 } 2435 } 2436 /* 2437 * No luck, now go thru the hard part of looking at the ptes 2438 * themselves. Sync so any pending REF/CHG bits are flushed 2439 * to the PTEs. 2440 */ 2441 SYNC(); 2442 LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) { 2443 PMAP_PVO_CHECK(pvo); /* sanity check */ 2444 /* 2445 * See if this pvo have a valid PTE. If so, fetch the 2446 * REF/CHG bits from the valid PTE. If the appropriate 2447 * ptebit is set, cache, it and return success. 2448 */ 2449 pt = pmap_pvo_to_pte(pvo, -1); 2450 if (pt != NULL) { 2451 pmap_pte_synch(pt, &pvo->pvo_pte); 2452 if (pvo->pvo_pte.pte_lo & ptebit) { 2453 pmap_attr_save(pg, ptebit); 2454 PMAP_PVO_CHECK(pvo); /* sanity check */ 2455 pmap_interrupts_restore(msr); 2456 PMAP_UNLOCK(); 2457 return true; 2458 } 2459 } 2460 } 2461 pmap_interrupts_restore(msr); 2462 PMAP_UNLOCK(); 2463 return false; 2464 } 2465 2466 bool 2467 pmap_clear_bit(struct vm_page *pg, int ptebit) 2468 { 2469 struct pvo_head *pvoh = vm_page_to_pvoh(pg); 2470 struct pvo_entry *pvo; 2471 volatile struct pte *pt; 2472 register_t msr; 2473 int rv = 0; 2474 2475 PMAP_LOCK(); 2476 msr = pmap_interrupts_off(); 2477 2478 /* 2479 * Fetch the cache value 2480 */ 2481 rv |= pmap_attr_fetch(pg); 2482 2483 /* 2484 * Clear the cached value. 2485 */ 2486 pmap_attr_clear(pg, ptebit); 2487 2488 /* 2489 * Sync so any pending REF/CHG bits are flushed to the PTEs (so we 2490 * can reset the right ones). Note that since the pvo entries and 2491 * list heads are accessed via BAT0 and are never placed in the 2492 * page table, we don't have to worry about further accesses setting 2493 * the REF/CHG bits. 2494 */ 2495 SYNC(); 2496 2497 /* 2498 * For each pvo entry, clear pvo's ptebit. If this pvo have a 2499 * valid PTE. If so, clear the ptebit from the valid PTE. 2500 */ 2501 LIST_FOREACH(pvo, pvoh, pvo_vlink) { 2502 PMAP_PVO_CHECK(pvo); /* sanity check */ 2503 pt = pmap_pvo_to_pte(pvo, -1); 2504 if (pt != NULL) { 2505 /* 2506 * Only sync the PTE if the bit we are looking 2507 * for is not already set. 2508 */ 2509 if ((pvo->pvo_pte.pte_lo & ptebit) == 0) 2510 pmap_pte_synch(pt, &pvo->pvo_pte); 2511 /* 2512 * If the bit we are looking for was already set, 2513 * clear that bit in the pte. 2514 */ 2515 if (pvo->pvo_pte.pte_lo & ptebit) 2516 pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit); 2517 } 2518 rv |= pvo->pvo_pte.pte_lo & (PTE_CHG|PTE_REF); 2519 pvo->pvo_pte.pte_lo &= ~ptebit; 2520 PMAP_PVO_CHECK(pvo); /* sanity check */ 2521 } 2522 pmap_interrupts_restore(msr); 2523 2524 /* 2525 * If we are clearing the modify bit and this page was marked EXEC 2526 * and the user of the page thinks the page was modified, then we 2527 * need to clean it from the icache if it's mapped or clear the EXEC 2528 * bit if it's not mapped. The page itself might not have the CHG 2529 * bit set if the modification was done via DMA to the page. 2530 */ 2531 if ((ptebit & PTE_CHG) && (rv & PTE_EXEC)) { 2532 if (LIST_EMPTY(pvoh)) { 2533 DPRINTFN(EXEC, "[pmap_clear_bit: %#" _PRIxpa ": clear-exec]\n", 2534 VM_PAGE_TO_PHYS(pg)); 2535 pmap_attr_clear(pg, PTE_EXEC); 2536 PMAPCOUNT(exec_uncached_clear_modify); 2537 } else { 2538 DPRINTFN(EXEC, "[pmap_clear_bit: %#" _PRIxpa ": syncicache]\n", 2539 VM_PAGE_TO_PHYS(pg)); 2540 pmap_syncicache(VM_PAGE_TO_PHYS(pg), PAGE_SIZE); 2541 PMAPCOUNT(exec_synced_clear_modify); 2542 } 2543 } 2544 PMAP_UNLOCK(); 2545 return (rv & ptebit) != 0; 2546 } 2547 2548 void 2549 pmap_procwr(struct proc *p, vaddr_t va, size_t len) 2550 { 2551 struct pvo_entry *pvo; 2552 size_t offset = va & ADDR_POFF; 2553 int s; 2554 2555 PMAP_LOCK(); 2556 s = splvm(); 2557 while (len > 0) { 2558 size_t seglen = PAGE_SIZE - offset; 2559 if (seglen > len) 2560 seglen = len; 2561 pvo = pmap_pvo_find_va(p->p_vmspace->vm_map.pmap, va, NULL); 2562 if (pvo != NULL && PVO_EXECUTABLE_P(pvo)) { 2563 pmap_syncicache( 2564 (pvo->pvo_pte.pte_lo & PTE_RPGN) | offset, seglen); 2565 PMAP_PVO_CHECK(pvo); 2566 } 2567 va += seglen; 2568 len -= seglen; 2569 offset = 0; 2570 } 2571 splx(s); 2572 PMAP_UNLOCK(); 2573 } 2574 2575 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 2576 void 2577 pmap_pte_print(volatile struct pte *pt) 2578 { 2579 printf("PTE %p: ", pt); 2580 2581 #if defined(PMAP_OEA) 2582 /* High word: */ 2583 printf("%#" _PRIxpte ": [", pt->pte_hi); 2584 #else 2585 printf("%#" _PRIxpte ": [", pt->pte_hi); 2586 #endif /* PMAP_OEA */ 2587 2588 printf("%c ", (pt->pte_hi & PTE_VALID) ? 'v' : 'i'); 2589 printf("%c ", (pt->pte_hi & PTE_HID) ? 'h' : '-'); 2590 2591 printf("%#" _PRIxpte " %#" _PRIxpte "", 2592 (pt->pte_hi &~ PTE_VALID)>>PTE_VSID_SHFT, 2593 pt->pte_hi & PTE_API); 2594 #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE) 2595 printf(" (va %#" _PRIxva ")] ", pmap_pte_to_va(pt)); 2596 #else 2597 printf(" (va %#" _PRIxva ")] ", pmap_pte_to_va(pt)); 2598 #endif /* PMAP_OEA */ 2599 2600 /* Low word: */ 2601 #if defined (PMAP_OEA) 2602 printf(" %#" _PRIxpte ": [", pt->pte_lo); 2603 printf("%#" _PRIxpte "... ", pt->pte_lo >> 12); 2604 #else 2605 printf(" %#" _PRIxpte ": [", pt->pte_lo); 2606 printf("%#" _PRIxpte "... ", pt->pte_lo >> 12); 2607 #endif 2608 printf("%c ", (pt->pte_lo & PTE_REF) ? 'r' : 'u'); 2609 printf("%c ", (pt->pte_lo & PTE_CHG) ? 'c' : 'n'); 2610 printf("%c", (pt->pte_lo & PTE_W) ? 'w' : '.'); 2611 printf("%c", (pt->pte_lo & PTE_I) ? 'i' : '.'); 2612 printf("%c", (pt->pte_lo & PTE_M) ? 'm' : '.'); 2613 printf("%c ", (pt->pte_lo & PTE_G) ? 'g' : '.'); 2614 switch (pt->pte_lo & PTE_PP) { 2615 case PTE_BR: printf("br]\n"); break; 2616 case PTE_BW: printf("bw]\n"); break; 2617 case PTE_SO: printf("so]\n"); break; 2618 case PTE_SW: printf("sw]\n"); break; 2619 } 2620 } 2621 #endif 2622 2623 #if defined(DDB) 2624 void 2625 pmap_pteg_check(void) 2626 { 2627 volatile struct pte *pt; 2628 int i; 2629 int ptegidx; 2630 u_int p_valid = 0; 2631 u_int s_valid = 0; 2632 u_int invalid = 0; 2633 2634 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) { 2635 for (pt = pmap_pteg_table[ptegidx].pt, i = 8; --i >= 0; pt++) { 2636 if (pt->pte_hi & PTE_VALID) { 2637 if (pt->pte_hi & PTE_HID) 2638 s_valid++; 2639 else 2640 { 2641 p_valid++; 2642 } 2643 } else 2644 invalid++; 2645 } 2646 } 2647 printf("pteg_check: v(p) %#x (%d), v(s) %#x (%d), i %#x (%d)\n", 2648 p_valid, p_valid, s_valid, s_valid, 2649 invalid, invalid); 2650 } 2651 2652 void 2653 pmap_print_mmuregs(void) 2654 { 2655 int i; 2656 #if defined (PMAP_OEA) || defined (PMAP_OEA_BRIDGE) 2657 u_int cpuvers; 2658 #endif 2659 #ifndef PMAP_OEA64 2660 vaddr_t addr; 2661 register_t soft_sr[16]; 2662 #endif 2663 #if defined (PMAP_OEA) || defined (PMAP_OEA_BRIDGE) 2664 struct bat soft_ibat[4]; 2665 struct bat soft_dbat[4]; 2666 #endif 2667 paddr_t sdr1; 2668 2669 #if defined (PMAP_OEA) || defined (PMAP_OEA_BRIDGE) 2670 cpuvers = MFPVR() >> 16; 2671 #endif 2672 __asm volatile ("mfsdr1 %0" : "=r"(sdr1)); 2673 #ifndef PMAP_OEA64 2674 addr = 0; 2675 for (i = 0; i < 16; i++) { 2676 soft_sr[i] = MFSRIN(addr); 2677 addr += (1 << ADDR_SR_SHFT); 2678 } 2679 #endif 2680 2681 #if defined (PMAP_OEA) || defined (PMAP_OEA_BRIDGE) 2682 /* read iBAT (601: uBAT) registers */ 2683 __asm volatile ("mfibatu %0,0" : "=r"(soft_ibat[0].batu)); 2684 __asm volatile ("mfibatl %0,0" : "=r"(soft_ibat[0].batl)); 2685 __asm volatile ("mfibatu %0,1" : "=r"(soft_ibat[1].batu)); 2686 __asm volatile ("mfibatl %0,1" : "=r"(soft_ibat[1].batl)); 2687 __asm volatile ("mfibatu %0,2" : "=r"(soft_ibat[2].batu)); 2688 __asm volatile ("mfibatl %0,2" : "=r"(soft_ibat[2].batl)); 2689 __asm volatile ("mfibatu %0,3" : "=r"(soft_ibat[3].batu)); 2690 __asm volatile ("mfibatl %0,3" : "=r"(soft_ibat[3].batl)); 2691 2692 2693 if (cpuvers != MPC601) { 2694 /* read dBAT registers */ 2695 __asm volatile ("mfdbatu %0,0" : "=r"(soft_dbat[0].batu)); 2696 __asm volatile ("mfdbatl %0,0" : "=r"(soft_dbat[0].batl)); 2697 __asm volatile ("mfdbatu %0,1" : "=r"(soft_dbat[1].batu)); 2698 __asm volatile ("mfdbatl %0,1" : "=r"(soft_dbat[1].batl)); 2699 __asm volatile ("mfdbatu %0,2" : "=r"(soft_dbat[2].batu)); 2700 __asm volatile ("mfdbatl %0,2" : "=r"(soft_dbat[2].batl)); 2701 __asm volatile ("mfdbatu %0,3" : "=r"(soft_dbat[3].batu)); 2702 __asm volatile ("mfdbatl %0,3" : "=r"(soft_dbat[3].batl)); 2703 } 2704 #endif 2705 2706 printf("SDR1:\t%#" _PRIxpa "\n", sdr1); 2707 #ifndef PMAP_OEA64 2708 printf("SR[]:\t"); 2709 for (i = 0; i < 4; i++) 2710 printf("0x%08lx, ", soft_sr[i]); 2711 printf("\n\t"); 2712 for ( ; i < 8; i++) 2713 printf("0x%08lx, ", soft_sr[i]); 2714 printf("\n\t"); 2715 for ( ; i < 12; i++) 2716 printf("0x%08lx, ", soft_sr[i]); 2717 printf("\n\t"); 2718 for ( ; i < 16; i++) 2719 printf("0x%08lx, ", soft_sr[i]); 2720 printf("\n"); 2721 #endif 2722 2723 #if defined(PMAP_OEA) || defined(PMAP_OEA_BRIDGE) 2724 printf("%cBAT[]:\t", cpuvers == MPC601 ? 'u' : 'i'); 2725 for (i = 0; i < 4; i++) { 2726 printf("0x%08lx 0x%08lx, ", 2727 soft_ibat[i].batu, soft_ibat[i].batl); 2728 if (i == 1) 2729 printf("\n\t"); 2730 } 2731 if (cpuvers != MPC601) { 2732 printf("\ndBAT[]:\t"); 2733 for (i = 0; i < 4; i++) { 2734 printf("0x%08lx 0x%08lx, ", 2735 soft_dbat[i].batu, soft_dbat[i].batl); 2736 if (i == 1) 2737 printf("\n\t"); 2738 } 2739 } 2740 printf("\n"); 2741 #endif /* PMAP_OEA... */ 2742 } 2743 2744 void 2745 pmap_print_pte(pmap_t pm, vaddr_t va) 2746 { 2747 struct pvo_entry *pvo; 2748 volatile struct pte *pt; 2749 int pteidx; 2750 2751 pvo = pmap_pvo_find_va(pm, va, &pteidx); 2752 if (pvo != NULL) { 2753 pt = pmap_pvo_to_pte(pvo, pteidx); 2754 if (pt != NULL) { 2755 printf("VA %#" _PRIxva " -> %p -> %s %#" _PRIxpte ", %#" _PRIxpte "\n", 2756 va, pt, 2757 pt->pte_hi & PTE_HID ? "(sec)" : "(pri)", 2758 pt->pte_hi, pt->pte_lo); 2759 } else { 2760 printf("No valid PTE found\n"); 2761 } 2762 } else { 2763 printf("Address not in pmap\n"); 2764 } 2765 } 2766 2767 void 2768 pmap_pteg_dist(void) 2769 { 2770 struct pvo_entry *pvo; 2771 int ptegidx; 2772 int depth; 2773 int max_depth = 0; 2774 unsigned int depths[64]; 2775 2776 memset(depths, 0, sizeof(depths)); 2777 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) { 2778 depth = 0; 2779 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 2780 depth++; 2781 } 2782 if (depth > max_depth) 2783 max_depth = depth; 2784 if (depth > 63) 2785 depth = 63; 2786 depths[depth]++; 2787 } 2788 2789 for (depth = 0; depth < 64; depth++) { 2790 printf(" [%2d]: %8u", depth, depths[depth]); 2791 if ((depth & 3) == 3) 2792 printf("\n"); 2793 if (depth == max_depth) 2794 break; 2795 } 2796 if ((depth & 3) != 3) 2797 printf("\n"); 2798 printf("Max depth found was %d\n", max_depth); 2799 } 2800 #endif /* DEBUG */ 2801 2802 #if defined(PMAPCHECK) || defined(DEBUG) 2803 void 2804 pmap_pvo_verify(void) 2805 { 2806 int ptegidx; 2807 int s; 2808 2809 s = splvm(); 2810 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) { 2811 struct pvo_entry *pvo; 2812 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 2813 if ((uintptr_t) pvo >= SEGMENT_LENGTH) 2814 panic("pmap_pvo_verify: invalid pvo %p " 2815 "on list %#x", pvo, ptegidx); 2816 pmap_pvo_check(pvo); 2817 } 2818 } 2819 splx(s); 2820 } 2821 #endif /* PMAPCHECK */ 2822 2823 2824 void * 2825 pmap_pool_ualloc(struct pool *pp, int flags) 2826 { 2827 struct pvo_page *pvop; 2828 2829 if (uvm.page_init_done != true) { 2830 return (void *) uvm_pageboot_alloc(PAGE_SIZE); 2831 } 2832 2833 PMAP_LOCK(); 2834 pvop = SIMPLEQ_FIRST(&pmap_upvop_head); 2835 if (pvop != NULL) { 2836 pmap_upvop_free--; 2837 SIMPLEQ_REMOVE_HEAD(&pmap_upvop_head, pvop_link); 2838 PMAP_UNLOCK(); 2839 return pvop; 2840 } 2841 PMAP_UNLOCK(); 2842 return pmap_pool_malloc(pp, flags); 2843 } 2844 2845 void * 2846 pmap_pool_malloc(struct pool *pp, int flags) 2847 { 2848 struct pvo_page *pvop; 2849 struct vm_page *pg; 2850 2851 PMAP_LOCK(); 2852 pvop = SIMPLEQ_FIRST(&pmap_mpvop_head); 2853 if (pvop != NULL) { 2854 pmap_mpvop_free--; 2855 SIMPLEQ_REMOVE_HEAD(&pmap_mpvop_head, pvop_link); 2856 PMAP_UNLOCK(); 2857 return pvop; 2858 } 2859 PMAP_UNLOCK(); 2860 again: 2861 pg = uvm_pagealloc_strat(NULL, 0, NULL, UVM_PGA_USERESERVE, 2862 UVM_PGA_STRAT_ONLY, VM_FREELIST_FIRST256); 2863 if (__predict_false(pg == NULL)) { 2864 if (flags & PR_WAITOK) { 2865 uvm_wait("plpg"); 2866 goto again; 2867 } else { 2868 return (0); 2869 } 2870 } 2871 KDASSERT(VM_PAGE_TO_PHYS(pg) == (uintptr_t)VM_PAGE_TO_PHYS(pg)); 2872 return (void *)(uintptr_t) VM_PAGE_TO_PHYS(pg); 2873 } 2874 2875 void 2876 pmap_pool_ufree(struct pool *pp, void *va) 2877 { 2878 struct pvo_page *pvop; 2879 #if 0 2880 if (PHYS_TO_VM_PAGE((paddr_t) va) != NULL) { 2881 pmap_pool_mfree(va, size, tag); 2882 return; 2883 } 2884 #endif 2885 PMAP_LOCK(); 2886 pvop = va; 2887 SIMPLEQ_INSERT_HEAD(&pmap_upvop_head, pvop, pvop_link); 2888 pmap_upvop_free++; 2889 if (pmap_upvop_free > pmap_upvop_maxfree) 2890 pmap_upvop_maxfree = pmap_upvop_free; 2891 PMAP_UNLOCK(); 2892 } 2893 2894 void 2895 pmap_pool_mfree(struct pool *pp, void *va) 2896 { 2897 struct pvo_page *pvop; 2898 2899 PMAP_LOCK(); 2900 pvop = va; 2901 SIMPLEQ_INSERT_HEAD(&pmap_mpvop_head, pvop, pvop_link); 2902 pmap_mpvop_free++; 2903 if (pmap_mpvop_free > pmap_mpvop_maxfree) 2904 pmap_mpvop_maxfree = pmap_mpvop_free; 2905 PMAP_UNLOCK(); 2906 #if 0 2907 uvm_pagefree(PHYS_TO_VM_PAGE((paddr_t) va)); 2908 #endif 2909 } 2910 2911 /* 2912 * This routine in bootstraping to steal to-be-managed memory (which will 2913 * then be unmanaged). We use it to grab from the first 256MB for our 2914 * pmap needs and above 256MB for other stuff. 2915 */ 2916 vaddr_t 2917 pmap_steal_memory(vsize_t vsize, vaddr_t *vstartp, vaddr_t *vendp) 2918 { 2919 vsize_t size; 2920 vaddr_t va; 2921 paddr_t start, end, pa = 0; 2922 int npgs, freelist; 2923 uvm_physseg_t bank; 2924 2925 if (uvm.page_init_done == true) 2926 panic("pmap_steal_memory: called _after_ bootstrap"); 2927 2928 *vstartp = VM_MIN_KERNEL_ADDRESS; 2929 *vendp = VM_MAX_KERNEL_ADDRESS; 2930 2931 size = round_page(vsize); 2932 npgs = atop(size); 2933 2934 /* 2935 * PA 0 will never be among those given to UVM so we can use it 2936 * to indicate we couldn't steal any memory. 2937 */ 2938 2939 for (bank = uvm_physseg_get_first(); 2940 uvm_physseg_valid_p(bank); 2941 bank = uvm_physseg_get_next(bank)) { 2942 2943 freelist = uvm_physseg_get_free_list(bank); 2944 start = uvm_physseg_get_start(bank); 2945 end = uvm_physseg_get_end(bank); 2946 2947 if (freelist == VM_FREELIST_FIRST256 && 2948 (end - start) >= npgs) { 2949 pa = ptoa(start); 2950 break; 2951 } 2952 } 2953 2954 if (pa == 0) 2955 panic("pmap_steal_memory: no approriate memory to steal!"); 2956 2957 uvm_physseg_unplug(start, npgs); 2958 2959 va = (vaddr_t) pa; 2960 memset((void *) va, 0, size); 2961 pmap_pages_stolen += npgs; 2962 #ifdef DEBUG 2963 if (pmapdebug && npgs > 1) { 2964 u_int cnt = 0; 2965 for (bank = uvm_physseg_get_first(); 2966 uvm_physseg_valid_p(bank); 2967 bank = uvm_physseg_get_next(bank)) { 2968 cnt += uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank); 2969 } 2970 printf("pmap_steal_memory: stole %u (total %u) pages (%u left)\n", 2971 npgs, pmap_pages_stolen, cnt); 2972 } 2973 #endif 2974 2975 return va; 2976 } 2977 2978 /* 2979 * Find a chuck of memory with right size and alignment. 2980 */ 2981 paddr_t 2982 pmap_boot_find_memory(psize_t size, psize_t alignment, int at_end) 2983 { 2984 struct mem_region *mp; 2985 paddr_t s, e; 2986 int i, j; 2987 2988 size = round_page(size); 2989 2990 DPRINTFN(BOOT, 2991 "pmap_boot_find_memory: size=%#" _PRIxpa ", alignment=%#" _PRIxpa ", at_end=%d", 2992 size, alignment, at_end); 2993 2994 if (alignment < PAGE_SIZE || (alignment & (alignment-1)) != 0) 2995 panic("pmap_boot_find_memory: invalid alignment %#" _PRIxpa, 2996 alignment); 2997 2998 if (at_end) { 2999 if (alignment != PAGE_SIZE) 3000 panic("pmap_boot_find_memory: invalid ending " 3001 "alignment %#" _PRIxpa, alignment); 3002 3003 for (mp = &avail[avail_cnt-1]; mp >= avail; mp--) { 3004 s = mp->start + mp->size - size; 3005 if (s >= mp->start && mp->size >= size) { 3006 DPRINTFN(BOOT, ": %#" _PRIxpa "\n", s); 3007 DPRINTFN(BOOT, 3008 "pmap_boot_find_memory: b-avail[%d] start " 3009 "%#" _PRIxpa " size %#" _PRIxpa "\n", mp - avail, 3010 mp->start, mp->size); 3011 mp->size -= size; 3012 DPRINTFN(BOOT, 3013 "pmap_boot_find_memory: a-avail[%d] start " 3014 "%#" _PRIxpa " size %#" _PRIxpa "\n", mp - avail, 3015 mp->start, mp->size); 3016 return s; 3017 } 3018 } 3019 panic("pmap_boot_find_memory: no available memory"); 3020 } 3021 3022 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) { 3023 s = (mp->start + alignment - 1) & ~(alignment-1); 3024 e = s + size; 3025 3026 /* 3027 * Is the calculated region entirely within the region? 3028 */ 3029 if (s < mp->start || e > mp->start + mp->size) 3030 continue; 3031 3032 DPRINTFN(BOOT, ": %#" _PRIxpa "\n", s); 3033 if (s == mp->start) { 3034 /* 3035 * If the block starts at the beginning of region, 3036 * adjust the size & start. (the region may now be 3037 * zero in length) 3038 */ 3039 DPRINTFN(BOOT, 3040 "pmap_boot_find_memory: b-avail[%d] start " 3041 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size); 3042 mp->start += size; 3043 mp->size -= size; 3044 DPRINTFN(BOOT, 3045 "pmap_boot_find_memory: a-avail[%d] start " 3046 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size); 3047 } else if (e == mp->start + mp->size) { 3048 /* 3049 * If the block starts at the beginning of region, 3050 * adjust only the size. 3051 */ 3052 DPRINTFN(BOOT, 3053 "pmap_boot_find_memory: b-avail[%d] start " 3054 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size); 3055 mp->size -= size; 3056 DPRINTFN(BOOT, 3057 "pmap_boot_find_memory: a-avail[%d] start " 3058 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size); 3059 } else { 3060 /* 3061 * Block is in the middle of the region, so we 3062 * have to split it in two. 3063 */ 3064 for (j = avail_cnt; j > i + 1; j--) { 3065 avail[j] = avail[j-1]; 3066 } 3067 DPRINTFN(BOOT, 3068 "pmap_boot_find_memory: b-avail[%d] start " 3069 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size); 3070 mp[1].start = e; 3071 mp[1].size = mp[0].start + mp[0].size - e; 3072 mp[0].size = s - mp[0].start; 3073 avail_cnt++; 3074 for (; i < avail_cnt; i++) { 3075 DPRINTFN(BOOT, 3076 "pmap_boot_find_memory: a-avail[%d] " 3077 "start %#" _PRIxpa " size %#" _PRIxpa "\n", i, 3078 avail[i].start, avail[i].size); 3079 } 3080 } 3081 KASSERT(s == (uintptr_t) s); 3082 return s; 3083 } 3084 panic("pmap_boot_find_memory: not enough memory for " 3085 "%#" _PRIxpa "/%#" _PRIxpa " allocation?", size, alignment); 3086 } 3087 3088 /* XXXSL: we dont have any BATs to do this, map in Segment 0 1:1 using page tables */ 3089 #if defined (PMAP_OEA64_BRIDGE) 3090 int 3091 pmap_setup_segment0_map(int use_large_pages, ...) 3092 { 3093 vaddr_t va, va_end; 3094 3095 register_t pte_lo = 0x0; 3096 int ptegidx = 0; 3097 struct pte pte; 3098 va_list ap; 3099 3100 /* Coherent + Supervisor RW, no user access */ 3101 pte_lo = PTE_M; 3102 3103 /* XXXSL 3104 * Map in 1st segment 1:1, we'll be careful not to spill kernel entries later, 3105 * these have to take priority. 3106 */ 3107 for (va = 0x0; va < SEGMENT_LENGTH; va += 0x1000) { 3108 ptegidx = va_to_pteg(pmap_kernel(), va); 3109 pmap_pte_create(&pte, pmap_kernel(), va, va | pte_lo); 3110 (void)pmap_pte_insert(ptegidx, &pte); 3111 } 3112 3113 va_start(ap, use_large_pages); 3114 while (1) { 3115 paddr_t pa; 3116 size_t size; 3117 3118 va = va_arg(ap, vaddr_t); 3119 3120 if (va == 0) 3121 break; 3122 3123 pa = va_arg(ap, paddr_t); 3124 size = va_arg(ap, size_t); 3125 3126 for (va_end = va + size; va < va_end; va += 0x1000, pa += 0x1000) { 3127 #if 0 3128 printf("%s: Inserting: va: %#" _PRIxva ", pa: %#" _PRIxpa "\n", __func__, va, pa); 3129 #endif 3130 ptegidx = va_to_pteg(pmap_kernel(), va); 3131 pmap_pte_create(&pte, pmap_kernel(), va, pa | pte_lo); 3132 (void)pmap_pte_insert(ptegidx, &pte); 3133 } 3134 } 3135 va_end(ap); 3136 3137 TLBSYNC(); 3138 SYNC(); 3139 return (0); 3140 } 3141 #endif /* PMAP_OEA64_BRIDGE */ 3142 3143 /* 3144 * This is not part of the defined PMAP interface and is specific to the 3145 * PowerPC architecture. This is called during initppc, before the system 3146 * is really initialized. 3147 */ 3148 void 3149 pmap_bootstrap(paddr_t kernelstart, paddr_t kernelend) 3150 { 3151 struct mem_region *mp, tmp; 3152 paddr_t s, e; 3153 psize_t size; 3154 int i, j; 3155 3156 /* 3157 * Get memory. 3158 */ 3159 mem_regions(&mem, &avail); 3160 #if defined(DEBUG) 3161 if (pmapdebug & PMAPDEBUG_BOOT) { 3162 printf("pmap_bootstrap: memory configuration:\n"); 3163 for (mp = mem; mp->size; mp++) { 3164 printf("pmap_bootstrap: mem start %#" _PRIxpa " size %#" _PRIxpa "\n", 3165 mp->start, mp->size); 3166 } 3167 for (mp = avail; mp->size; mp++) { 3168 printf("pmap_bootstrap: avail start %#" _PRIxpa " size %#" _PRIxpa "\n", 3169 mp->start, mp->size); 3170 } 3171 } 3172 #endif 3173 3174 /* 3175 * Find out how much physical memory we have and in how many chunks. 3176 */ 3177 for (mem_cnt = 0, mp = mem; mp->size; mp++) { 3178 if (mp->start >= pmap_memlimit) 3179 continue; 3180 if (mp->start + mp->size > pmap_memlimit) { 3181 size = pmap_memlimit - mp->start; 3182 physmem += btoc(size); 3183 } else { 3184 physmem += btoc(mp->size); 3185 } 3186 mem_cnt++; 3187 } 3188 3189 /* 3190 * Count the number of available entries. 3191 */ 3192 for (avail_cnt = 0, mp = avail; mp->size; mp++) 3193 avail_cnt++; 3194 3195 /* 3196 * Page align all regions. 3197 */ 3198 kernelstart = trunc_page(kernelstart); 3199 kernelend = round_page(kernelend); 3200 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) { 3201 s = round_page(mp->start); 3202 mp->size -= (s - mp->start); 3203 mp->size = trunc_page(mp->size); 3204 mp->start = s; 3205 e = mp->start + mp->size; 3206 3207 DPRINTFN(BOOT, 3208 "pmap_bootstrap: b-avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n", 3209 i, mp->start, mp->size); 3210 3211 /* 3212 * Don't allow the end to run beyond our artificial limit 3213 */ 3214 if (e > pmap_memlimit) 3215 e = pmap_memlimit; 3216 3217 /* 3218 * Is this region empty or strange? skip it. 3219 */ 3220 if (e <= s) { 3221 mp->start = 0; 3222 mp->size = 0; 3223 continue; 3224 } 3225 3226 /* 3227 * Does this overlap the beginning of kernel? 3228 * Does extend past the end of the kernel? 3229 */ 3230 else if (s < kernelstart && e > kernelstart) { 3231 if (e > kernelend) { 3232 avail[avail_cnt].start = kernelend; 3233 avail[avail_cnt].size = e - kernelend; 3234 avail_cnt++; 3235 } 3236 mp->size = kernelstart - s; 3237 } 3238 /* 3239 * Check whether this region overlaps the end of the kernel. 3240 */ 3241 else if (s < kernelend && e > kernelend) { 3242 mp->start = kernelend; 3243 mp->size = e - kernelend; 3244 } 3245 /* 3246 * Look whether this regions is completely inside the kernel. 3247 * Nuke it if it does. 3248 */ 3249 else if (s >= kernelstart && e <= kernelend) { 3250 mp->start = 0; 3251 mp->size = 0; 3252 } 3253 /* 3254 * If the user imposed a memory limit, enforce it. 3255 */ 3256 else if (s >= pmap_memlimit) { 3257 mp->start = -PAGE_SIZE; /* let's know why */ 3258 mp->size = 0; 3259 } 3260 else { 3261 mp->start = s; 3262 mp->size = e - s; 3263 } 3264 DPRINTFN(BOOT, 3265 "pmap_bootstrap: a-avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n", 3266 i, mp->start, mp->size); 3267 } 3268 3269 /* 3270 * Move (and uncount) all the null return to the end. 3271 */ 3272 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) { 3273 if (mp->size == 0) { 3274 tmp = avail[i]; 3275 avail[i] = avail[--avail_cnt]; 3276 avail[avail_cnt] = avail[i]; 3277 } 3278 } 3279 3280 /* 3281 * (Bubble)sort them into ascending order. 3282 */ 3283 for (i = 0; i < avail_cnt; i++) { 3284 for (j = i + 1; j < avail_cnt; j++) { 3285 if (avail[i].start > avail[j].start) { 3286 tmp = avail[i]; 3287 avail[i] = avail[j]; 3288 avail[j] = tmp; 3289 } 3290 } 3291 } 3292 3293 /* 3294 * Make sure they don't overlap. 3295 */ 3296 for (mp = avail, i = 0; i < avail_cnt - 1; i++, mp++) { 3297 if (mp[0].start + mp[0].size > mp[1].start) { 3298 mp[0].size = mp[1].start - mp[0].start; 3299 } 3300 DPRINTFN(BOOT, 3301 "pmap_bootstrap: avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n", 3302 i, mp->start, mp->size); 3303 } 3304 DPRINTFN(BOOT, 3305 "pmap_bootstrap: avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n", 3306 i, mp->start, mp->size); 3307 3308 #ifdef PTEGCOUNT 3309 pmap_pteg_cnt = PTEGCOUNT; 3310 #else /* PTEGCOUNT */ 3311 3312 pmap_pteg_cnt = 0x1000; 3313 3314 while (pmap_pteg_cnt < physmem) 3315 pmap_pteg_cnt <<= 1; 3316 3317 pmap_pteg_cnt >>= 1; 3318 #endif /* PTEGCOUNT */ 3319 3320 #ifdef DEBUG 3321 DPRINTFN(BOOT, "pmap_pteg_cnt: 0x%x\n", pmap_pteg_cnt); 3322 #endif 3323 3324 /* 3325 * Find suitably aligned memory for PTEG hash table. 3326 */ 3327 size = pmap_pteg_cnt * sizeof(struct pteg); 3328 pmap_pteg_table = (void *)(uintptr_t) pmap_boot_find_memory(size, size, 0); 3329 3330 #ifdef DEBUG 3331 DPRINTFN(BOOT, 3332 "PTEG cnt: 0x%x HTAB size: 0x%08x bytes, address: %p\n", pmap_pteg_cnt, (unsigned int)size, pmap_pteg_table); 3333 #endif 3334 3335 3336 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 3337 if ( (uintptr_t) pmap_pteg_table + size > SEGMENT_LENGTH) 3338 panic("pmap_bootstrap: pmap_pteg_table end (%p + %#" _PRIxpa ") > 256MB", 3339 pmap_pteg_table, size); 3340 #endif 3341 3342 memset(__UNVOLATILE(pmap_pteg_table), 0, 3343 pmap_pteg_cnt * sizeof(struct pteg)); 3344 pmap_pteg_mask = pmap_pteg_cnt - 1; 3345 3346 /* 3347 * We cannot do pmap_steal_memory here since UVM hasn't been loaded 3348 * with pages. So we just steal them before giving them to UVM. 3349 */ 3350 size = sizeof(pmap_pvo_table[0]) * pmap_pteg_cnt; 3351 pmap_pvo_table = (void *)(uintptr_t) pmap_boot_find_memory(size, PAGE_SIZE, 0); 3352 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 3353 if ( (uintptr_t) pmap_pvo_table + size > SEGMENT_LENGTH) 3354 panic("pmap_bootstrap: pmap_pvo_table end (%p + %#" _PRIxpa ") > 256MB", 3355 pmap_pvo_table, size); 3356 #endif 3357 3358 for (i = 0; i < pmap_pteg_cnt; i++) 3359 TAILQ_INIT(&pmap_pvo_table[i]); 3360 3361 #ifndef MSGBUFADDR 3362 /* 3363 * Allocate msgbuf in high memory. 3364 */ 3365 msgbuf_paddr = pmap_boot_find_memory(MSGBUFSIZE, PAGE_SIZE, 1); 3366 #endif 3367 3368 for (mp = avail, i = 0; i < avail_cnt; mp++, i++) { 3369 paddr_t pfstart = atop(mp->start); 3370 paddr_t pfend = atop(mp->start + mp->size); 3371 if (mp->size == 0) 3372 continue; 3373 if (mp->start + mp->size <= SEGMENT_LENGTH) { 3374 uvm_page_physload(pfstart, pfend, pfstart, pfend, 3375 VM_FREELIST_FIRST256); 3376 } else if (mp->start >= SEGMENT_LENGTH) { 3377 uvm_page_physload(pfstart, pfend, pfstart, pfend, 3378 VM_FREELIST_DEFAULT); 3379 } else { 3380 pfend = atop(SEGMENT_LENGTH); 3381 uvm_page_physload(pfstart, pfend, pfstart, pfend, 3382 VM_FREELIST_FIRST256); 3383 pfstart = atop(SEGMENT_LENGTH); 3384 pfend = atop(mp->start + mp->size); 3385 uvm_page_physload(pfstart, pfend, pfstart, pfend, 3386 VM_FREELIST_DEFAULT); 3387 } 3388 } 3389 3390 /* 3391 * Make sure kernel vsid is allocated as well as VSID 0. 3392 */ 3393 pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS-1)) / VSID_NBPW] 3394 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 3395 pmap_vsid_bitmap[(PHYSMAP_VSIDBITS & (NPMAPS-1)) / VSID_NBPW] 3396 |= 1 << (PHYSMAP_VSIDBITS % VSID_NBPW); 3397 pmap_vsid_bitmap[0] |= 1; 3398 3399 /* 3400 * Initialize kernel pmap and hardware. 3401 */ 3402 3403 /* PMAP_OEA64_BRIDGE does support these instructions */ 3404 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 3405 for (i = 0; i < 16; i++) { 3406 #if defined(PPC_OEA601) 3407 /* XXX wedges for segment register 0xf , so set later */ 3408 if ((iosrtable[i] & SR601_T) && ((MFPVR() >> 16) == MPC601)) 3409 continue; 3410 #endif 3411 pmap_kernel()->pm_sr[i] = KERNELN_SEGMENT(i)|SR_PRKEY; 3412 __asm volatile ("mtsrin %0,%1" 3413 :: "r"(KERNELN_SEGMENT(i)|SR_PRKEY), "r"(i << ADDR_SR_SHFT)); 3414 } 3415 3416 pmap_kernel()->pm_sr[KERNEL_SR] = KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY; 3417 __asm volatile ("mtsr %0,%1" 3418 :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT)); 3419 #ifdef KERNEL2_SR 3420 pmap_kernel()->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT|SR_SUKEY|SR_PRKEY; 3421 __asm volatile ("mtsr %0,%1" 3422 :: "n"(KERNEL2_SR), "r"(KERNEL2_SEGMENT)); 3423 #endif 3424 #endif /* PMAP_OEA || PMAP_OEA64_BRIDGE */ 3425 #if defined (PMAP_OEA) 3426 for (i = 0; i < 16; i++) { 3427 if (iosrtable[i] & SR601_T) { 3428 pmap_kernel()->pm_sr[i] = iosrtable[i]; 3429 __asm volatile ("mtsrin %0,%1" 3430 :: "r"(iosrtable[i]), "r"(i << ADDR_SR_SHFT)); 3431 } 3432 } 3433 __asm volatile ("sync; mtsdr1 %0; isync" 3434 :: "r"((uintptr_t)pmap_pteg_table | (pmap_pteg_mask >> 10))); 3435 #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE) 3436 __asm __volatile ("sync; mtsdr1 %0; isync" 3437 :: "r"((uintptr_t)pmap_pteg_table | (32 - __builtin_clz(pmap_pteg_mask >> 11)))); 3438 #endif 3439 tlbia(); 3440 3441 #ifdef ALTIVEC 3442 pmap_use_altivec = cpu_altivec; 3443 #endif 3444 3445 #ifdef DEBUG 3446 if (pmapdebug & PMAPDEBUG_BOOT) { 3447 u_int cnt; 3448 uvm_physseg_t bank; 3449 char pbuf[9]; 3450 for (cnt = 0, bank = uvm_physseg_get_first(); 3451 uvm_physseg_valid_p(bank); 3452 bank = uvm_physseg_get_next(bank)) { 3453 cnt += uvm_physseg_get_avail_end(bank) - 3454 uvm_physseg_get_avail_start(bank); 3455 printf("pmap_bootstrap: vm_physmem[%d]=%#" _PRIxpa "-%#" _PRIxpa "/%#" _PRIxpa "\n", 3456 bank, 3457 ptoa(uvm_physseg_get_avail_start(bank)), 3458 ptoa(uvm_physseg_get_avail_end(bank)), 3459 ptoa(uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank))); 3460 } 3461 format_bytes(pbuf, sizeof(pbuf), ptoa((u_int64_t) cnt)); 3462 printf("pmap_bootstrap: UVM memory = %s (%u pages)\n", 3463 pbuf, cnt); 3464 } 3465 #endif 3466 3467 pool_init(&pmap_upvo_pool, sizeof(struct pvo_entry), 3468 sizeof(struct pvo_entry), 0, 0, "pmap_upvopl", 3469 &pmap_pool_uallocator, IPL_VM); 3470 3471 pool_setlowat(&pmap_upvo_pool, 252); 3472 3473 pool_init(&pmap_pool, sizeof(struct pmap), 3474 sizeof(void *), 0, 0, "pmap_pl", &pmap_pool_uallocator, 3475 IPL_NONE); 3476 3477 #if defined(PMAP_NEED_MAPKERNEL) 3478 { 3479 struct pmap *pm = pmap_kernel(); 3480 #if defined(PMAP_NEED_FULL_MAPKERNEL) 3481 extern int etext[], kernel_text[]; 3482 vaddr_t va, va_etext = (paddr_t) etext; 3483 #endif 3484 paddr_t pa, pa_end; 3485 register_t sr; 3486 struct pte pt; 3487 unsigned int ptegidx; 3488 int bank; 3489 3490 sr = PHYSMAPN_SEGMENT(0) | SR_SUKEY|SR_PRKEY; 3491 pm->pm_sr[0] = sr; 3492 3493 for (bank = 0; bank < vm_nphysseg; bank++) { 3494 pa_end = ptoa(VM_PHYSMEM_PTR(bank)->avail_end); 3495 pa = ptoa(VM_PHYSMEM_PTR(bank)->avail_start); 3496 for (; pa < pa_end; pa += PAGE_SIZE) { 3497 ptegidx = va_to_pteg(pm, pa); 3498 pmap_pte_create(&pt, pm, pa, pa | PTE_M|PTE_BW); 3499 pmap_pte_insert(ptegidx, &pt); 3500 } 3501 } 3502 3503 #if defined(PMAP_NEED_FULL_MAPKERNEL) 3504 va = (vaddr_t) kernel_text; 3505 3506 for (pa = kernelstart; va < va_etext; 3507 pa += PAGE_SIZE, va += PAGE_SIZE) { 3508 ptegidx = va_to_pteg(pm, va); 3509 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BR); 3510 pmap_pte_insert(ptegidx, &pt); 3511 } 3512 3513 for (; pa < kernelend; 3514 pa += PAGE_SIZE, va += PAGE_SIZE) { 3515 ptegidx = va_to_pteg(pm, va); 3516 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW); 3517 pmap_pte_insert(ptegidx, &pt); 3518 } 3519 3520 for (va = 0, pa = 0; va < kernelstart; 3521 pa += PAGE_SIZE, va += PAGE_SIZE) { 3522 ptegidx = va_to_pteg(pm, va); 3523 if (va < 0x3000) 3524 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BR); 3525 else 3526 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW); 3527 pmap_pte_insert(ptegidx, &pt); 3528 } 3529 for (va = kernelend, pa = kernelend; va < SEGMENT_LENGTH; 3530 pa += PAGE_SIZE, va += PAGE_SIZE) { 3531 ptegidx = va_to_pteg(pm, va); 3532 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW); 3533 pmap_pte_insert(ptegidx, &pt); 3534 } 3535 #endif 3536 3537 __asm volatile ("mtsrin %0,%1" 3538 :: "r"(sr), "r"(kernelstart)); 3539 } 3540 #endif 3541 3542 #if defined(PMAPDEBUG) 3543 if ( pmapdebug ) 3544 pmap_print_mmuregs(); 3545 #endif 3546 } 3547