1 /* $NetBSD: pmap.c,v 1.98 2020/07/06 09:34:17 rin Exp $ */ 2 /*- 3 * Copyright (c) 2001 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 8 * 9 * Support for PPC64 Bridge mode added by Sanjay Lal <sanjayl@kymasys.com> 10 * of Kyma Systems LLC. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 36 * Copyright (C) 1995, 1996 TooLs GmbH. 37 * All rights reserved. 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 1. Redistributions of source code must retain the above copyright 43 * notice, this list of conditions and the following disclaimer. 44 * 2. Redistributions in binary form must reproduce the above copyright 45 * notice, this list of conditions and the following disclaimer in the 46 * documentation and/or other materials provided with the distribution. 47 * 3. All advertising materials mentioning features or use of this software 48 * must display the following acknowledgement: 49 * This product includes software developed by TooLs GmbH. 50 * 4. The name of TooLs GmbH may not be used to endorse or promote products 51 * derived from this software without specific prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 54 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 55 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 56 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 58 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 59 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 60 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 61 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 62 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 63 */ 64 65 #include <sys/cdefs.h> 66 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.98 2020/07/06 09:34:17 rin Exp $"); 67 68 #define PMAP_NOOPNAMES 69 70 #ifdef _KERNEL_OPT 71 #include "opt_altivec.h" 72 #include "opt_multiprocessor.h" 73 #include "opt_pmap.h" 74 #include "opt_ppcarch.h" 75 #endif 76 77 #include <sys/param.h> 78 #include <sys/proc.h> 79 #include <sys/pool.h> 80 #include <sys/queue.h> 81 #include <sys/device.h> /* for evcnt */ 82 #include <sys/systm.h> 83 #include <sys/atomic.h> 84 85 #include <uvm/uvm.h> 86 #include <uvm/uvm_physseg.h> 87 88 #include <machine/powerpc.h> 89 #include <powerpc/bat.h> 90 #include <powerpc/pcb.h> 91 #include <powerpc/psl.h> 92 #include <powerpc/spr.h> 93 #include <powerpc/oea/spr.h> 94 #include <powerpc/oea/sr_601.h> 95 96 #ifdef ALTIVEC 97 extern int pmap_use_altivec; 98 #endif 99 100 #ifdef PMAP_MEMLIMIT 101 static paddr_t pmap_memlimit = PMAP_MEMLIMIT; 102 #else 103 static paddr_t pmap_memlimit = -PAGE_SIZE; /* there is no limit */ 104 #endif 105 106 extern struct pmap kernel_pmap_; 107 static unsigned int pmap_pages_stolen; 108 static u_long pmap_pte_valid; 109 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 110 static u_long pmap_pvo_enter_depth; 111 static u_long pmap_pvo_remove_depth; 112 #endif 113 114 #ifndef MSGBUFADDR 115 extern paddr_t msgbuf_paddr; 116 #endif 117 118 static struct mem_region *mem, *avail; 119 static u_int mem_cnt, avail_cnt; 120 121 #if !defined(PMAP_OEA64) && !defined(PMAP_OEA64_BRIDGE) 122 # define PMAP_OEA 1 123 #endif 124 125 #if defined(PMAP_OEA) 126 #define _PRIxpte "lx" 127 #else 128 #define _PRIxpte PRIx64 129 #endif 130 #define _PRIxpa "lx" 131 #define _PRIxva "lx" 132 #define _PRIsr "lx" 133 134 #ifdef PMAP_NEEDS_FIXUP 135 #if defined(PMAP_OEA) 136 #define PMAPNAME(name) pmap32_##name 137 #elif defined(PMAP_OEA64) 138 #define PMAPNAME(name) pmap64_##name 139 #elif defined(PMAP_OEA64_BRIDGE) 140 #define PMAPNAME(name) pmap64bridge_##name 141 #else 142 #error unknown variant for pmap 143 #endif 144 #endif /* PMAP_NEEDS_FIXUP */ 145 146 #ifdef PMAPNAME 147 #define STATIC static 148 #define pmap_pte_spill PMAPNAME(pte_spill) 149 #define pmap_real_memory PMAPNAME(real_memory) 150 #define pmap_init PMAPNAME(init) 151 #define pmap_virtual_space PMAPNAME(virtual_space) 152 #define pmap_create PMAPNAME(create) 153 #define pmap_reference PMAPNAME(reference) 154 #define pmap_destroy PMAPNAME(destroy) 155 #define pmap_copy PMAPNAME(copy) 156 #define pmap_update PMAPNAME(update) 157 #define pmap_enter PMAPNAME(enter) 158 #define pmap_remove PMAPNAME(remove) 159 #define pmap_kenter_pa PMAPNAME(kenter_pa) 160 #define pmap_kremove PMAPNAME(kremove) 161 #define pmap_extract PMAPNAME(extract) 162 #define pmap_protect PMAPNAME(protect) 163 #define pmap_unwire PMAPNAME(unwire) 164 #define pmap_page_protect PMAPNAME(page_protect) 165 #define pmap_query_bit PMAPNAME(query_bit) 166 #define pmap_clear_bit PMAPNAME(clear_bit) 167 168 #define pmap_activate PMAPNAME(activate) 169 #define pmap_deactivate PMAPNAME(deactivate) 170 171 #define pmap_pinit PMAPNAME(pinit) 172 #define pmap_procwr PMAPNAME(procwr) 173 174 #define pmap_pool PMAPNAME(pool) 175 #define pmap_upvo_pool PMAPNAME(upvo_pool) 176 #define pmap_mpvo_pool PMAPNAME(mpvo_pool) 177 #define pmap_pvo_table PMAPNAME(pvo_table) 178 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 179 #define pmap_pte_print PMAPNAME(pte_print) 180 #define pmap_pteg_check PMAPNAME(pteg_check) 181 #define pmap_print_mmruregs PMAPNAME(print_mmuregs) 182 #define pmap_print_pte PMAPNAME(print_pte) 183 #define pmap_pteg_dist PMAPNAME(pteg_dist) 184 #endif 185 #if defined(DEBUG) || defined(PMAPCHECK) 186 #define pmap_pvo_verify PMAPNAME(pvo_verify) 187 #define pmapcheck PMAPNAME(check) 188 #endif 189 #if defined(DEBUG) || defined(PMAPDEBUG) 190 #define pmapdebug PMAPNAME(debug) 191 #endif 192 #define pmap_steal_memory PMAPNAME(steal_memory) 193 #define pmap_bootstrap PMAPNAME(bootstrap) 194 #else 195 #define STATIC /* nothing */ 196 #endif /* PMAPNAME */ 197 198 STATIC int pmap_pte_spill(struct pmap *, vaddr_t, bool); 199 STATIC void pmap_real_memory(paddr_t *, psize_t *); 200 STATIC void pmap_init(void); 201 STATIC void pmap_virtual_space(vaddr_t *, vaddr_t *); 202 STATIC pmap_t pmap_create(void); 203 STATIC void pmap_reference(pmap_t); 204 STATIC void pmap_destroy(pmap_t); 205 STATIC void pmap_copy(pmap_t, pmap_t, vaddr_t, vsize_t, vaddr_t); 206 STATIC void pmap_update(pmap_t); 207 STATIC int pmap_enter(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int); 208 STATIC void pmap_remove(pmap_t, vaddr_t, vaddr_t); 209 STATIC void pmap_kenter_pa(vaddr_t, paddr_t, vm_prot_t, u_int); 210 STATIC void pmap_kremove(vaddr_t, vsize_t); 211 STATIC bool pmap_extract(pmap_t, vaddr_t, paddr_t *); 212 213 STATIC void pmap_protect(pmap_t, vaddr_t, vaddr_t, vm_prot_t); 214 STATIC void pmap_unwire(pmap_t, vaddr_t); 215 STATIC void pmap_page_protect(struct vm_page *, vm_prot_t); 216 STATIC bool pmap_query_bit(struct vm_page *, int); 217 STATIC bool pmap_clear_bit(struct vm_page *, int); 218 219 STATIC void pmap_activate(struct lwp *); 220 STATIC void pmap_deactivate(struct lwp *); 221 222 STATIC void pmap_pinit(pmap_t pm); 223 STATIC void pmap_procwr(struct proc *, vaddr_t, size_t); 224 225 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 226 STATIC void pmap_pte_print(volatile struct pte *); 227 STATIC void pmap_pteg_check(void); 228 STATIC void pmap_print_mmuregs(void); 229 STATIC void pmap_print_pte(pmap_t, vaddr_t); 230 STATIC void pmap_pteg_dist(void); 231 #endif 232 #if defined(DEBUG) || defined(PMAPCHECK) 233 STATIC void pmap_pvo_verify(void); 234 #endif 235 STATIC vaddr_t pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *); 236 STATIC void pmap_bootstrap(paddr_t, paddr_t); 237 238 #ifdef PMAPNAME 239 const struct pmap_ops PMAPNAME(ops) = { 240 .pmapop_pte_spill = pmap_pte_spill, 241 .pmapop_real_memory = pmap_real_memory, 242 .pmapop_init = pmap_init, 243 .pmapop_virtual_space = pmap_virtual_space, 244 .pmapop_create = pmap_create, 245 .pmapop_reference = pmap_reference, 246 .pmapop_destroy = pmap_destroy, 247 .pmapop_copy = pmap_copy, 248 .pmapop_update = pmap_update, 249 .pmapop_enter = pmap_enter, 250 .pmapop_remove = pmap_remove, 251 .pmapop_kenter_pa = pmap_kenter_pa, 252 .pmapop_kremove = pmap_kremove, 253 .pmapop_extract = pmap_extract, 254 .pmapop_protect = pmap_protect, 255 .pmapop_unwire = pmap_unwire, 256 .pmapop_page_protect = pmap_page_protect, 257 .pmapop_query_bit = pmap_query_bit, 258 .pmapop_clear_bit = pmap_clear_bit, 259 .pmapop_activate = pmap_activate, 260 .pmapop_deactivate = pmap_deactivate, 261 .pmapop_pinit = pmap_pinit, 262 .pmapop_procwr = pmap_procwr, 263 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 264 .pmapop_pte_print = pmap_pte_print, 265 .pmapop_pteg_check = pmap_pteg_check, 266 .pmapop_print_mmuregs = pmap_print_mmuregs, 267 .pmapop_print_pte = pmap_print_pte, 268 .pmapop_pteg_dist = pmap_pteg_dist, 269 #else 270 .pmapop_pte_print = NULL, 271 .pmapop_pteg_check = NULL, 272 .pmapop_print_mmuregs = NULL, 273 .pmapop_print_pte = NULL, 274 .pmapop_pteg_dist = NULL, 275 #endif 276 #if defined(DEBUG) || defined(PMAPCHECK) 277 .pmapop_pvo_verify = pmap_pvo_verify, 278 #else 279 .pmapop_pvo_verify = NULL, 280 #endif 281 .pmapop_steal_memory = pmap_steal_memory, 282 .pmapop_bootstrap = pmap_bootstrap, 283 }; 284 #endif /* !PMAPNAME */ 285 286 /* 287 * The following structure is aligned to 32 bytes 288 */ 289 struct pvo_entry { 290 LIST_ENTRY(pvo_entry) pvo_vlink; /* Link to common virt page */ 291 TAILQ_ENTRY(pvo_entry) pvo_olink; /* Link to overflow entry */ 292 struct pte pvo_pte; /* Prebuilt PTE */ 293 pmap_t pvo_pmap; /* ptr to owning pmap */ 294 vaddr_t pvo_vaddr; /* VA of entry */ 295 #define PVO_PTEGIDX_MASK 0x0007 /* which PTEG slot */ 296 #define PVO_PTEGIDX_VALID 0x0008 /* slot is valid */ 297 #define PVO_WIRED 0x0010 /* PVO entry is wired */ 298 #define PVO_MANAGED 0x0020 /* PVO e. for managed page */ 299 #define PVO_EXECUTABLE 0x0040 /* PVO e. for executable page */ 300 #define PVO_WIRED_P(pvo) ((pvo)->pvo_vaddr & PVO_WIRED) 301 #define PVO_MANAGED_P(pvo) ((pvo)->pvo_vaddr & PVO_MANAGED) 302 #define PVO_EXECUTABLE_P(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE) 303 #define PVO_ENTER_INSERT 0 /* PVO has been removed */ 304 #define PVO_SPILL_UNSET 1 /* PVO has been evicted */ 305 #define PVO_SPILL_SET 2 /* PVO has been spilled */ 306 #define PVO_SPILL_INSERT 3 /* PVO has been inserted */ 307 #define PVO_PMAP_PAGE_PROTECT 4 /* PVO has changed */ 308 #define PVO_PMAP_PROTECT 5 /* PVO has changed */ 309 #define PVO_REMOVE 6 /* PVO has been removed */ 310 #define PVO_WHERE_MASK 15 311 #define PVO_WHERE_SHFT 8 312 } __attribute__ ((aligned (32))); 313 #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) 314 #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) 315 #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) 316 #define PVO_PTEGIDX_CLR(pvo) \ 317 ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) 318 #define PVO_PTEGIDX_SET(pvo,i) \ 319 ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) 320 #define PVO_WHERE(pvo,w) \ 321 ((pvo)->pvo_vaddr &= ~(PVO_WHERE_MASK << PVO_WHERE_SHFT), \ 322 (pvo)->pvo_vaddr |= ((PVO_ ## w) << PVO_WHERE_SHFT)) 323 324 TAILQ_HEAD(pvo_tqhead, pvo_entry); 325 struct pvo_tqhead *pmap_pvo_table; /* pvo entries by ptegroup index */ 326 static struct pvo_head pmap_pvo_kunmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged); /* list of unmanaged pages */ 327 static struct pvo_head pmap_pvo_unmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged); /* list of unmanaged pages */ 328 329 struct pool pmap_pool; /* pool for pmap structures */ 330 struct pool pmap_upvo_pool; /* pool for pvo entries for unmanaged pages */ 331 struct pool pmap_mpvo_pool; /* pool for pvo entries for managed pages */ 332 333 /* 334 * We keep a cache of unmanaged pages to be used for pvo entries for 335 * unmanaged pages. 336 */ 337 struct pvo_page { 338 SIMPLEQ_ENTRY(pvo_page) pvop_link; 339 }; 340 SIMPLEQ_HEAD(pvop_head, pvo_page); 341 static struct pvop_head pmap_upvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_upvop_head); 342 static struct pvop_head pmap_mpvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_mpvop_head); 343 static u_long pmap_upvop_free; 344 static u_long pmap_upvop_maxfree; 345 static u_long pmap_mpvop_free; 346 static u_long pmap_mpvop_maxfree; 347 348 static void *pmap_pool_ualloc(struct pool *, int); 349 static void *pmap_pool_malloc(struct pool *, int); 350 351 static void pmap_pool_ufree(struct pool *, void *); 352 static void pmap_pool_mfree(struct pool *, void *); 353 354 static struct pool_allocator pmap_pool_mallocator = { 355 .pa_alloc = pmap_pool_malloc, 356 .pa_free = pmap_pool_mfree, 357 .pa_pagesz = 0, 358 }; 359 360 static struct pool_allocator pmap_pool_uallocator = { 361 .pa_alloc = pmap_pool_ualloc, 362 .pa_free = pmap_pool_ufree, 363 .pa_pagesz = 0, 364 }; 365 366 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 367 void pmap_pte_print(volatile struct pte *); 368 void pmap_pteg_check(void); 369 void pmap_pteg_dist(void); 370 void pmap_print_pte(pmap_t, vaddr_t); 371 void pmap_print_mmuregs(void); 372 #endif 373 374 #if defined(DEBUG) || defined(PMAPCHECK) 375 #ifdef PMAPCHECK 376 int pmapcheck = 1; 377 #else 378 int pmapcheck = 0; 379 #endif 380 void pmap_pvo_verify(void); 381 static void pmap_pvo_check(const struct pvo_entry *); 382 #define PMAP_PVO_CHECK(pvo) \ 383 do { \ 384 if (pmapcheck) \ 385 pmap_pvo_check(pvo); \ 386 } while (0) 387 #else 388 #define PMAP_PVO_CHECK(pvo) do { } while (/*CONSTCOND*/0) 389 #endif 390 static int pmap_pte_insert(int, struct pte *); 391 static int pmap_pvo_enter(pmap_t, struct pool *, struct pvo_head *, 392 vaddr_t, paddr_t, register_t, int); 393 static void pmap_pvo_remove(struct pvo_entry *, int, struct pvo_head *); 394 static void pmap_pvo_free(struct pvo_entry *); 395 static void pmap_pvo_free_list(struct pvo_head *); 396 static struct pvo_entry *pmap_pvo_find_va(pmap_t, vaddr_t, int *); 397 static volatile struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int); 398 static struct pvo_entry *pmap_pvo_reclaim(struct pmap *); 399 static void pvo_set_exec(struct pvo_entry *); 400 static void pvo_clear_exec(struct pvo_entry *); 401 402 static void tlbia(void); 403 404 static void pmap_release(pmap_t); 405 static paddr_t pmap_boot_find_memory(psize_t, psize_t, int); 406 407 static uint32_t pmap_pvo_reclaim_nextidx; 408 #ifdef DEBUG 409 static int pmap_pvo_reclaim_debugctr; 410 #endif 411 412 #define VSID_NBPW (sizeof(uint32_t) * 8) 413 static uint32_t pmap_vsid_bitmap[NPMAPS / VSID_NBPW]; 414 415 static int pmap_initialized; 416 417 #if defined(DEBUG) || defined(PMAPDEBUG) 418 #define PMAPDEBUG_BOOT 0x0001 419 #define PMAPDEBUG_PTE 0x0002 420 #define PMAPDEBUG_EXEC 0x0008 421 #define PMAPDEBUG_PVOENTER 0x0010 422 #define PMAPDEBUG_PVOREMOVE 0x0020 423 #define PMAPDEBUG_ACTIVATE 0x0100 424 #define PMAPDEBUG_CREATE 0x0200 425 #define PMAPDEBUG_ENTER 0x1000 426 #define PMAPDEBUG_KENTER 0x2000 427 #define PMAPDEBUG_KREMOVE 0x4000 428 #define PMAPDEBUG_REMOVE 0x8000 429 430 unsigned int pmapdebug = 0; 431 432 # define DPRINTF(x, ...) printf(x, __VA_ARGS__) 433 # define DPRINTFN(n, x, ...) do if (pmapdebug & PMAPDEBUG_ ## n) printf(x, __VA_ARGS__); while (0) 434 #else 435 # define DPRINTF(x, ...) do { } while (0) 436 # define DPRINTFN(n, x, ...) do { } while (0) 437 #endif 438 439 440 #ifdef PMAPCOUNTERS 441 /* 442 * From pmap_subr.c 443 */ 444 extern struct evcnt pmap_evcnt_mappings; 445 extern struct evcnt pmap_evcnt_unmappings; 446 447 extern struct evcnt pmap_evcnt_kernel_mappings; 448 extern struct evcnt pmap_evcnt_kernel_unmappings; 449 450 extern struct evcnt pmap_evcnt_mappings_replaced; 451 452 extern struct evcnt pmap_evcnt_exec_mappings; 453 extern struct evcnt pmap_evcnt_exec_cached; 454 455 extern struct evcnt pmap_evcnt_exec_synced; 456 extern struct evcnt pmap_evcnt_exec_synced_clear_modify; 457 extern struct evcnt pmap_evcnt_exec_synced_pvo_remove; 458 459 extern struct evcnt pmap_evcnt_exec_uncached_page_protect; 460 extern struct evcnt pmap_evcnt_exec_uncached_clear_modify; 461 extern struct evcnt pmap_evcnt_exec_uncached_zero_page; 462 extern struct evcnt pmap_evcnt_exec_uncached_copy_page; 463 extern struct evcnt pmap_evcnt_exec_uncached_pvo_remove; 464 465 extern struct evcnt pmap_evcnt_updates; 466 extern struct evcnt pmap_evcnt_collects; 467 extern struct evcnt pmap_evcnt_copies; 468 469 extern struct evcnt pmap_evcnt_ptes_spilled; 470 extern struct evcnt pmap_evcnt_ptes_unspilled; 471 extern struct evcnt pmap_evcnt_ptes_evicted; 472 473 extern struct evcnt pmap_evcnt_ptes_primary[8]; 474 extern struct evcnt pmap_evcnt_ptes_secondary[8]; 475 extern struct evcnt pmap_evcnt_ptes_removed; 476 extern struct evcnt pmap_evcnt_ptes_changed; 477 extern struct evcnt pmap_evcnt_pvos_reclaimed; 478 extern struct evcnt pmap_evcnt_pvos_failed; 479 480 extern struct evcnt pmap_evcnt_zeroed_pages; 481 extern struct evcnt pmap_evcnt_copied_pages; 482 extern struct evcnt pmap_evcnt_idlezeroed_pages; 483 484 #define PMAPCOUNT(ev) ((pmap_evcnt_ ## ev).ev_count++) 485 #define PMAPCOUNT2(ev) ((ev).ev_count++) 486 #else 487 #define PMAPCOUNT(ev) ((void) 0) 488 #define PMAPCOUNT2(ev) ((void) 0) 489 #endif 490 491 #define TLBIE(va) __asm volatile("tlbie %0" :: "r"(va)) 492 493 /* XXXSL: this needs to be moved to assembler */ 494 #define TLBIEL(va) __asm __volatile("tlbie %0" :: "r"(va)) 495 496 #ifdef MD_TLBSYNC 497 #define TLBSYNC() MD_TLBSYNC() 498 #else 499 #define TLBSYNC() __asm volatile("tlbsync") 500 #endif 501 #define SYNC() __asm volatile("sync") 502 #define EIEIO() __asm volatile("eieio") 503 #define DCBST(va) __asm __volatile("dcbst 0,%0" :: "r"(va)) 504 #define MFMSR() mfmsr() 505 #define MTMSR(psl) mtmsr(psl) 506 #define MFPVR() mfpvr() 507 #define MFSRIN(va) mfsrin(va) 508 #define MFTB() mfrtcltbl() 509 510 #if defined(DDB) && !defined(PMAP_OEA64) 511 static inline register_t 512 mfsrin(vaddr_t va) 513 { 514 register_t sr; 515 __asm volatile ("mfsrin %0,%1" : "=r"(sr) : "r"(va)); 516 return sr; 517 } 518 #endif /* DDB && !PMAP_OEA64 */ 519 520 #if defined (PMAP_OEA64_BRIDGE) 521 extern void mfmsr64 (register64_t *result); 522 #endif /* PMAP_OEA64_BRIDGE */ 523 524 #define PMAP_LOCK() KERNEL_LOCK(1, NULL) 525 #define PMAP_UNLOCK() KERNEL_UNLOCK_ONE(NULL) 526 527 static inline register_t 528 pmap_interrupts_off(void) 529 { 530 register_t msr = MFMSR(); 531 if (msr & PSL_EE) 532 MTMSR(msr & ~PSL_EE); 533 return msr; 534 } 535 536 static void 537 pmap_interrupts_restore(register_t msr) 538 { 539 if (msr & PSL_EE) 540 MTMSR(msr); 541 } 542 543 static inline u_int32_t 544 mfrtcltbl(void) 545 { 546 #ifdef PPC_OEA601 547 if ((MFPVR() >> 16) == MPC601) 548 return (mfrtcl() >> 7); 549 else 550 #endif 551 return (mftbl()); 552 } 553 554 /* 555 * These small routines may have to be replaced, 556 * if/when we support processors other that the 604. 557 */ 558 559 void 560 tlbia(void) 561 { 562 char *i; 563 564 SYNC(); 565 #if defined(PMAP_OEA) 566 /* 567 * Why not use "tlbia"? Because not all processors implement it. 568 * 569 * This needs to be a per-CPU callback to do the appropriate thing 570 * for the CPU. XXX 571 */ 572 for (i = 0; i < (char *)0x00040000; i += 0x00001000) { 573 TLBIE(i); 574 EIEIO(); 575 SYNC(); 576 } 577 #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE) 578 /* This is specifically for the 970, 970UM v1.6 pp. 140. */ 579 for (i = 0; i <= (char *)0xFF000; i += 0x00001000) { 580 TLBIEL(i); 581 EIEIO(); 582 SYNC(); 583 } 584 #endif 585 TLBSYNC(); 586 SYNC(); 587 } 588 589 static inline register_t 590 va_to_vsid(const struct pmap *pm, vaddr_t addr) 591 { 592 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 593 return (pm->pm_sr[addr >> ADDR_SR_SHFT] & SR_VSID) >> SR_VSID_SHFT; 594 #else /* PMAP_OEA64 */ 595 #if 0 596 const struct ste *ste; 597 register_t hash; 598 int i; 599 600 hash = (addr >> ADDR_ESID_SHFT) & ADDR_ESID_HASH; 601 602 /* 603 * Try the primary group first 604 */ 605 ste = pm->pm_stes[hash].stes; 606 for (i = 0; i < 8; i++, ste++) { 607 if (ste->ste_hi & STE_V) && 608 (addr & ~(ADDR_POFF|ADDR_PIDX)) == (ste->ste_hi & STE_ESID)) 609 return ste; 610 } 611 612 /* 613 * Then the secondary group. 614 */ 615 ste = pm->pm_stes[hash ^ ADDR_ESID_HASH].stes; 616 for (i = 0; i < 8; i++, ste++) { 617 if (ste->ste_hi & STE_V) && 618 (addr & ~(ADDR_POFF|ADDR_PIDX)) == (ste->ste_hi & STE_ESID)) 619 return addr; 620 } 621 622 return NULL; 623 #else 624 /* 625 * Rather than searching the STE groups for the VSID, we know 626 * how we generate that from the ESID and so do that. 627 */ 628 return VSID_MAKE(addr >> ADDR_SR_SHFT, pm->pm_vsid) >> SR_VSID_SHFT; 629 #endif 630 #endif /* PMAP_OEA */ 631 } 632 633 static inline register_t 634 va_to_pteg(const struct pmap *pm, vaddr_t addr) 635 { 636 register_t hash; 637 638 hash = va_to_vsid(pm, addr) ^ ((addr & ADDR_PIDX) >> ADDR_PIDX_SHFT); 639 return hash & pmap_pteg_mask; 640 } 641 642 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 643 /* 644 * Given a PTE in the page table, calculate the VADDR that hashes to it. 645 * The only bit of magic is that the top 4 bits of the address doesn't 646 * technically exist in the PTE. But we know we reserved 4 bits of the 647 * VSID for it so that's how we get it. 648 */ 649 static vaddr_t 650 pmap_pte_to_va(volatile const struct pte *pt) 651 { 652 vaddr_t va; 653 uintptr_t ptaddr = (uintptr_t) pt; 654 655 if (pt->pte_hi & PTE_HID) 656 ptaddr ^= (pmap_pteg_mask * sizeof(struct pteg)); 657 658 /* PPC Bits 10-19 PPC64 Bits 42-51 */ 659 #if defined(PMAP_OEA) 660 va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x3ff; 661 #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE) 662 va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x7ff; 663 #endif 664 va <<= ADDR_PIDX_SHFT; 665 666 /* PPC Bits 4-9 PPC64 Bits 36-41 */ 667 va |= (pt->pte_hi & PTE_API) << ADDR_API_SHFT; 668 669 #if defined(PMAP_OEA64) 670 /* PPC63 Bits 0-35 */ 671 /* va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT; */ 672 #elif defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE) 673 /* PPC Bits 0-3 */ 674 va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT; 675 #endif 676 677 return va; 678 } 679 #endif 680 681 static inline struct pvo_head * 682 pa_to_pvoh(paddr_t pa, struct vm_page **pg_p) 683 { 684 struct vm_page *pg; 685 struct vm_page_md *md; 686 687 pg = PHYS_TO_VM_PAGE(pa); 688 if (pg_p != NULL) 689 *pg_p = pg; 690 if (pg == NULL) 691 return &pmap_pvo_unmanaged; 692 md = VM_PAGE_TO_MD(pg); 693 return &md->mdpg_pvoh; 694 } 695 696 static inline struct pvo_head * 697 vm_page_to_pvoh(struct vm_page *pg) 698 { 699 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 700 701 return &md->mdpg_pvoh; 702 } 703 704 705 static inline void 706 pmap_attr_clear(struct vm_page *pg, int ptebit) 707 { 708 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 709 710 md->mdpg_attrs &= ~ptebit; 711 } 712 713 static inline int 714 pmap_attr_fetch(struct vm_page *pg) 715 { 716 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 717 718 return md->mdpg_attrs; 719 } 720 721 static inline void 722 pmap_attr_save(struct vm_page *pg, int ptebit) 723 { 724 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 725 726 md->mdpg_attrs |= ptebit; 727 } 728 729 static inline int 730 pmap_pte_compare(const volatile struct pte *pt, const struct pte *pvo_pt) 731 { 732 if (pt->pte_hi == pvo_pt->pte_hi 733 #if 0 734 && ((pt->pte_lo ^ pvo_pt->pte_lo) & 735 ~(PTE_REF|PTE_CHG)) == 0 736 #endif 737 ) 738 return 1; 739 return 0; 740 } 741 742 static inline void 743 pmap_pte_create(struct pte *pt, const struct pmap *pm, vaddr_t va, register_t pte_lo) 744 { 745 /* 746 * Construct the PTE. Default to IMB initially. Valid bit 747 * only gets set when the real pte is set in memory. 748 * 749 * Note: Don't set the valid bit for correct operation of tlb update. 750 */ 751 #if defined(PMAP_OEA) 752 pt->pte_hi = (va_to_vsid(pm, va) << PTE_VSID_SHFT) 753 | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API); 754 pt->pte_lo = pte_lo; 755 #elif defined (PMAP_OEA64_BRIDGE) || defined (PMAP_OEA64) 756 pt->pte_hi = ((u_int64_t)va_to_vsid(pm, va) << PTE_VSID_SHFT) 757 | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API); 758 pt->pte_lo = (u_int64_t) pte_lo; 759 #endif /* PMAP_OEA */ 760 } 761 762 static inline void 763 pmap_pte_synch(volatile struct pte *pt, struct pte *pvo_pt) 764 { 765 pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF|PTE_CHG); 766 } 767 768 static inline void 769 pmap_pte_clear(volatile struct pte *pt, vaddr_t va, int ptebit) 770 { 771 /* 772 * As shown in Section 7.6.3.2.3 773 */ 774 pt->pte_lo &= ~ptebit; 775 TLBIE(va); 776 SYNC(); 777 EIEIO(); 778 TLBSYNC(); 779 SYNC(); 780 #ifdef MULTIPROCESSOR 781 DCBST(pt); 782 #endif 783 } 784 785 static inline void 786 pmap_pte_set(volatile struct pte *pt, struct pte *pvo_pt) 787 { 788 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 789 if (pvo_pt->pte_hi & PTE_VALID) 790 panic("pte_set: setting an already valid pte %p", pvo_pt); 791 #endif 792 pvo_pt->pte_hi |= PTE_VALID; 793 794 /* 795 * Update the PTE as defined in section 7.6.3.1 796 * Note that the REF/CHG bits are from pvo_pt and thus should 797 * have been saved so this routine can restore them (if desired). 798 */ 799 pt->pte_lo = pvo_pt->pte_lo; 800 EIEIO(); 801 pt->pte_hi = pvo_pt->pte_hi; 802 TLBSYNC(); 803 SYNC(); 804 #ifdef MULTIPROCESSOR 805 DCBST(pt); 806 #endif 807 pmap_pte_valid++; 808 } 809 810 static inline void 811 pmap_pte_unset(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va) 812 { 813 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 814 if ((pvo_pt->pte_hi & PTE_VALID) == 0) 815 panic("pte_unset: attempt to unset an inactive pte#1 %p/%p", pvo_pt, pt); 816 if ((pt->pte_hi & PTE_VALID) == 0) 817 panic("pte_unset: attempt to unset an inactive pte#2 %p/%p", pvo_pt, pt); 818 #endif 819 820 pvo_pt->pte_hi &= ~PTE_VALID; 821 /* 822 * Force the ref & chg bits back into the PTEs. 823 */ 824 SYNC(); 825 /* 826 * Invalidate the pte ... (Section 7.6.3.3) 827 */ 828 pt->pte_hi &= ~PTE_VALID; 829 SYNC(); 830 TLBIE(va); 831 SYNC(); 832 EIEIO(); 833 TLBSYNC(); 834 SYNC(); 835 /* 836 * Save the ref & chg bits ... 837 */ 838 pmap_pte_synch(pt, pvo_pt); 839 pmap_pte_valid--; 840 } 841 842 static inline void 843 pmap_pte_change(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va) 844 { 845 /* 846 * Invalidate the PTE 847 */ 848 pmap_pte_unset(pt, pvo_pt, va); 849 pmap_pte_set(pt, pvo_pt); 850 } 851 852 /* 853 * Try to insert the PTE @ *pvo_pt into the pmap_pteg_table at ptegidx 854 * (either primary or secondary location). 855 * 856 * Note: both the destination and source PTEs must not have PTE_VALID set. 857 */ 858 859 static int 860 pmap_pte_insert(int ptegidx, struct pte *pvo_pt) 861 { 862 volatile struct pte *pt; 863 int i; 864 865 #if defined(DEBUG) 866 DPRINTFN(PTE, "pmap_pte_insert: idx %#x, pte %#" _PRIxpte " %#" _PRIxpte "\n", 867 ptegidx, pvo_pt->pte_hi, pvo_pt->pte_lo); 868 #endif 869 /* 870 * First try primary hash. 871 */ 872 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 873 if ((pt->pte_hi & PTE_VALID) == 0) { 874 pvo_pt->pte_hi &= ~PTE_HID; 875 pmap_pte_set(pt, pvo_pt); 876 return i; 877 } 878 } 879 880 /* 881 * Now try secondary hash. 882 */ 883 ptegidx ^= pmap_pteg_mask; 884 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 885 if ((pt->pte_hi & PTE_VALID) == 0) { 886 pvo_pt->pte_hi |= PTE_HID; 887 pmap_pte_set(pt, pvo_pt); 888 return i; 889 } 890 } 891 return -1; 892 } 893 894 /* 895 * Spill handler. 896 * 897 * Tries to spill a page table entry from the overflow area. 898 * This runs in either real mode (if dealing with a exception spill) 899 * or virtual mode when dealing with manually spilling one of the 900 * kernel's pte entries. In either case, interrupts are already 901 * disabled. 902 */ 903 904 int 905 pmap_pte_spill(struct pmap *pm, vaddr_t addr, bool exec) 906 { 907 struct pvo_entry *source_pvo, *victim_pvo, *next_pvo; 908 struct pvo_entry *pvo; 909 /* XXX: gcc -- vpvoh is always set at either *1* or *2* */ 910 struct pvo_tqhead *pvoh, *vpvoh = NULL; 911 int ptegidx, i, j; 912 volatile struct pteg *pteg; 913 volatile struct pte *pt; 914 915 PMAP_LOCK(); 916 917 ptegidx = va_to_pteg(pm, addr); 918 919 /* 920 * Have to substitute some entry. Use the primary hash for this. 921 * Use low bits of timebase as random generator. Make sure we are 922 * not picking a kernel pte for replacement. 923 */ 924 pteg = &pmap_pteg_table[ptegidx]; 925 i = MFTB() & 7; 926 for (j = 0; j < 8; j++) { 927 pt = &pteg->pt[i]; 928 if ((pt->pte_hi & PTE_VALID) == 0) 929 break; 930 if (VSID_TO_HASH((pt->pte_hi & PTE_VSID) >> PTE_VSID_SHFT) 931 < PHYSMAP_VSIDBITS) 932 break; 933 i = (i + 1) & 7; 934 } 935 KASSERT(j < 8); 936 937 source_pvo = NULL; 938 victim_pvo = NULL; 939 pvoh = &pmap_pvo_table[ptegidx]; 940 TAILQ_FOREACH(pvo, pvoh, pvo_olink) { 941 942 /* 943 * We need to find pvo entry for this address... 944 */ 945 PMAP_PVO_CHECK(pvo); /* sanity check */ 946 947 /* 948 * If we haven't found the source and we come to a PVO with 949 * a valid PTE, then we know we can't find it because all 950 * evicted PVOs always are first in the list. 951 */ 952 if (source_pvo == NULL && (pvo->pvo_pte.pte_hi & PTE_VALID)) 953 break; 954 if (source_pvo == NULL && pm == pvo->pvo_pmap && 955 addr == PVO_VADDR(pvo)) { 956 957 /* 958 * Now we have found the entry to be spilled into the 959 * pteg. Attempt to insert it into the page table. 960 */ 961 j = pmap_pte_insert(ptegidx, &pvo->pvo_pte); 962 if (j >= 0) { 963 PVO_PTEGIDX_SET(pvo, j); 964 PMAP_PVO_CHECK(pvo); /* sanity check */ 965 PVO_WHERE(pvo, SPILL_INSERT); 966 pvo->pvo_pmap->pm_evictions--; 967 PMAPCOUNT(ptes_spilled); 968 PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID) 969 ? pmap_evcnt_ptes_secondary 970 : pmap_evcnt_ptes_primary)[j]); 971 972 /* 973 * Since we keep the evicted entries at the 974 * from of the PVO list, we need move this 975 * (now resident) PVO after the evicted 976 * entries. 977 */ 978 next_pvo = TAILQ_NEXT(pvo, pvo_olink); 979 980 /* 981 * If we don't have to move (either we were the 982 * last entry or the next entry was valid), 983 * don't change our position. Otherwise 984 * move ourselves to the tail of the queue. 985 */ 986 if (next_pvo != NULL && 987 !(next_pvo->pvo_pte.pte_hi & PTE_VALID)) { 988 TAILQ_REMOVE(pvoh, pvo, pvo_olink); 989 TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink); 990 } 991 PMAP_UNLOCK(); 992 return 1; 993 } 994 source_pvo = pvo; 995 if (exec && !PVO_EXECUTABLE_P(source_pvo)) { 996 PMAP_UNLOCK(); 997 return 0; 998 } 999 if (victim_pvo != NULL) 1000 break; 1001 } 1002 1003 /* 1004 * We also need the pvo entry of the victim we are replacing 1005 * so save the R & C bits of the PTE. 1006 */ 1007 if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && 1008 pmap_pte_compare(pt, &pvo->pvo_pte)) { 1009 vpvoh = pvoh; /* *1* */ 1010 victim_pvo = pvo; 1011 if (source_pvo != NULL) 1012 break; 1013 } 1014 } 1015 1016 if (source_pvo == NULL) { 1017 PMAPCOUNT(ptes_unspilled); 1018 PMAP_UNLOCK(); 1019 return 0; 1020 } 1021 1022 if (victim_pvo == NULL) { 1023 if ((pt->pte_hi & PTE_HID) == 0) 1024 panic("pmap_pte_spill: victim p-pte (%p) has " 1025 "no pvo entry!", pt); 1026 1027 /* 1028 * If this is a secondary PTE, we need to search 1029 * its primary pvo bucket for the matching PVO. 1030 */ 1031 vpvoh = &pmap_pvo_table[ptegidx ^ pmap_pteg_mask]; /* *2* */ 1032 TAILQ_FOREACH(pvo, vpvoh, pvo_olink) { 1033 PMAP_PVO_CHECK(pvo); /* sanity check */ 1034 1035 /* 1036 * We also need the pvo entry of the victim we are 1037 * replacing so save the R & C bits of the PTE. 1038 */ 1039 if (pmap_pte_compare(pt, &pvo->pvo_pte)) { 1040 victim_pvo = pvo; 1041 break; 1042 } 1043 } 1044 if (victim_pvo == NULL) 1045 panic("pmap_pte_spill: victim s-pte (%p) has " 1046 "no pvo entry!", pt); 1047 } 1048 1049 /* 1050 * The victim should be not be a kernel PVO/PTE entry. 1051 */ 1052 KASSERT(victim_pvo->pvo_pmap != pmap_kernel()); 1053 KASSERT(PVO_PTEGIDX_ISSET(victim_pvo)); 1054 KASSERT(PVO_PTEGIDX_GET(victim_pvo) == i); 1055 1056 /* 1057 * We are invalidating the TLB entry for the EA for the 1058 * we are replacing even though its valid; If we don't 1059 * we lose any ref/chg bit changes contained in the TLB 1060 * entry. 1061 */ 1062 source_pvo->pvo_pte.pte_hi &= ~PTE_HID; 1063 1064 /* 1065 * To enforce the PVO list ordering constraint that all 1066 * evicted entries should come before all valid entries, 1067 * move the source PVO to the tail of its list and the 1068 * victim PVO to the head of its list (which might not be 1069 * the same list, if the victim was using the secondary hash). 1070 */ 1071 TAILQ_REMOVE(pvoh, source_pvo, pvo_olink); 1072 TAILQ_INSERT_TAIL(pvoh, source_pvo, pvo_olink); 1073 TAILQ_REMOVE(vpvoh, victim_pvo, pvo_olink); 1074 TAILQ_INSERT_HEAD(vpvoh, victim_pvo, pvo_olink); 1075 pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr); 1076 pmap_pte_set(pt, &source_pvo->pvo_pte); 1077 victim_pvo->pvo_pmap->pm_evictions++; 1078 source_pvo->pvo_pmap->pm_evictions--; 1079 PVO_WHERE(victim_pvo, SPILL_UNSET); 1080 PVO_WHERE(source_pvo, SPILL_SET); 1081 1082 PVO_PTEGIDX_CLR(victim_pvo); 1083 PVO_PTEGIDX_SET(source_pvo, i); 1084 PMAPCOUNT2(pmap_evcnt_ptes_primary[i]); 1085 PMAPCOUNT(ptes_spilled); 1086 PMAPCOUNT(ptes_evicted); 1087 PMAPCOUNT(ptes_removed); 1088 1089 PMAP_PVO_CHECK(victim_pvo); 1090 PMAP_PVO_CHECK(source_pvo); 1091 1092 PMAP_UNLOCK(); 1093 return 1; 1094 } 1095 1096 /* 1097 * Restrict given range to physical memory 1098 */ 1099 void 1100 pmap_real_memory(paddr_t *start, psize_t *size) 1101 { 1102 struct mem_region *mp; 1103 1104 for (mp = mem; mp->size; mp++) { 1105 if (*start + *size > mp->start 1106 && *start < mp->start + mp->size) { 1107 if (*start < mp->start) { 1108 *size -= mp->start - *start; 1109 *start = mp->start; 1110 } 1111 if (*start + *size > mp->start + mp->size) 1112 *size = mp->start + mp->size - *start; 1113 return; 1114 } 1115 } 1116 *size = 0; 1117 } 1118 1119 /* 1120 * Initialize anything else for pmap handling. 1121 * Called during vm_init(). 1122 */ 1123 void 1124 pmap_init(void) 1125 { 1126 pool_init(&pmap_mpvo_pool, sizeof(struct pvo_entry), 1127 sizeof(struct pvo_entry), 0, 0, "pmap_mpvopl", 1128 &pmap_pool_mallocator, IPL_NONE); 1129 1130 pool_setlowat(&pmap_mpvo_pool, 1008); 1131 1132 pmap_initialized = 1; 1133 1134 } 1135 1136 /* 1137 * How much virtual space does the kernel get? 1138 */ 1139 void 1140 pmap_virtual_space(vaddr_t *start, vaddr_t *end) 1141 { 1142 /* 1143 * For now, reserve one segment (minus some overhead) for kernel 1144 * virtual memory 1145 */ 1146 *start = VM_MIN_KERNEL_ADDRESS; 1147 *end = VM_MAX_KERNEL_ADDRESS; 1148 } 1149 1150 /* 1151 * Allocate, initialize, and return a new physical map. 1152 */ 1153 pmap_t 1154 pmap_create(void) 1155 { 1156 pmap_t pm; 1157 1158 pm = pool_get(&pmap_pool, PR_WAITOK); 1159 KASSERT((vaddr_t)pm < VM_MIN_KERNEL_ADDRESS); 1160 memset((void *)pm, 0, sizeof *pm); 1161 pmap_pinit(pm); 1162 1163 DPRINTFN(CREATE, "pmap_create: pm %p:\n" 1164 "\t%#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr 1165 " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr "\n" 1166 "\t%#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr 1167 " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr "\n", 1168 pm, 1169 pm->pm_sr[0], pm->pm_sr[1], 1170 pm->pm_sr[2], pm->pm_sr[3], 1171 pm->pm_sr[4], pm->pm_sr[5], 1172 pm->pm_sr[6], pm->pm_sr[7], 1173 pm->pm_sr[8], pm->pm_sr[9], 1174 pm->pm_sr[10], pm->pm_sr[11], 1175 pm->pm_sr[12], pm->pm_sr[13], 1176 pm->pm_sr[14], pm->pm_sr[15]); 1177 return pm; 1178 } 1179 1180 /* 1181 * Initialize a preallocated and zeroed pmap structure. 1182 */ 1183 void 1184 pmap_pinit(pmap_t pm) 1185 { 1186 register_t entropy = MFTB(); 1187 register_t mask; 1188 int i; 1189 1190 /* 1191 * Allocate some segment registers for this pmap. 1192 */ 1193 pm->pm_refs = 1; 1194 PMAP_LOCK(); 1195 for (i = 0; i < NPMAPS; i += VSID_NBPW) { 1196 static register_t pmap_vsidcontext; 1197 register_t hash; 1198 unsigned int n; 1199 1200 /* Create a new value by multiplying by a prime adding in 1201 * entropy from the timebase register. This is to make the 1202 * VSID more random so that the PT Hash function collides 1203 * less often. (note that the prime causes gcc to do shifts 1204 * instead of a multiply) 1205 */ 1206 pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy; 1207 hash = pmap_vsidcontext & (NPMAPS - 1); 1208 if (hash == 0) { /* 0 is special, avoid it */ 1209 entropy += 0xbadf00d; 1210 continue; 1211 } 1212 n = hash >> 5; 1213 mask = 1L << (hash & (VSID_NBPW-1)); 1214 hash = pmap_vsidcontext; 1215 if (pmap_vsid_bitmap[n] & mask) { /* collision? */ 1216 /* anything free in this bucket? */ 1217 if (~pmap_vsid_bitmap[n] == 0) { 1218 entropy = hash ^ (hash >> 16); 1219 continue; 1220 } 1221 i = ffs(~pmap_vsid_bitmap[n]) - 1; 1222 mask = 1L << i; 1223 hash &= ~(VSID_NBPW-1); 1224 hash |= i; 1225 } 1226 hash &= PTE_VSID >> PTE_VSID_SHFT; 1227 pmap_vsid_bitmap[n] |= mask; 1228 pm->pm_vsid = hash; 1229 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 1230 for (i = 0; i < 16; i++) 1231 pm->pm_sr[i] = VSID_MAKE(i, hash) | SR_PRKEY | 1232 SR_NOEXEC; 1233 #endif 1234 PMAP_UNLOCK(); 1235 return; 1236 } 1237 PMAP_UNLOCK(); 1238 panic("pmap_pinit: out of segments"); 1239 } 1240 1241 /* 1242 * Add a reference to the given pmap. 1243 */ 1244 void 1245 pmap_reference(pmap_t pm) 1246 { 1247 atomic_inc_uint(&pm->pm_refs); 1248 } 1249 1250 /* 1251 * Retire the given pmap from service. 1252 * Should only be called if the map contains no valid mappings. 1253 */ 1254 void 1255 pmap_destroy(pmap_t pm) 1256 { 1257 if (atomic_dec_uint_nv(&pm->pm_refs) == 0) { 1258 pmap_release(pm); 1259 pool_put(&pmap_pool, pm); 1260 } 1261 } 1262 1263 /* 1264 * Release any resources held by the given physical map. 1265 * Called when a pmap initialized by pmap_pinit is being released. 1266 */ 1267 void 1268 pmap_release(pmap_t pm) 1269 { 1270 int idx, mask; 1271 1272 KASSERT(pm->pm_stats.resident_count == 0); 1273 KASSERT(pm->pm_stats.wired_count == 0); 1274 1275 PMAP_LOCK(); 1276 if (pm->pm_sr[0] == 0) 1277 panic("pmap_release"); 1278 idx = pm->pm_vsid & (NPMAPS-1); 1279 mask = 1 << (idx % VSID_NBPW); 1280 idx /= VSID_NBPW; 1281 1282 KASSERT(pmap_vsid_bitmap[idx] & mask); 1283 pmap_vsid_bitmap[idx] &= ~mask; 1284 PMAP_UNLOCK(); 1285 } 1286 1287 /* 1288 * Copy the range specified by src_addr/len 1289 * from the source map to the range dst_addr/len 1290 * in the destination map. 1291 * 1292 * This routine is only advisory and need not do anything. 1293 */ 1294 void 1295 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, 1296 vsize_t len, vaddr_t src_addr) 1297 { 1298 PMAPCOUNT(copies); 1299 } 1300 1301 /* 1302 * Require that all active physical maps contain no 1303 * incorrect entries NOW. 1304 */ 1305 void 1306 pmap_update(struct pmap *pmap) 1307 { 1308 PMAPCOUNT(updates); 1309 TLBSYNC(); 1310 } 1311 1312 static inline int 1313 pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 1314 { 1315 int pteidx; 1316 /* 1317 * We can find the actual pte entry without searching by 1318 * grabbing the PTEG index from 3 unused bits in pte_lo[11:9] 1319 * and by noticing the HID bit. 1320 */ 1321 pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); 1322 if (pvo->pvo_pte.pte_hi & PTE_HID) 1323 pteidx ^= pmap_pteg_mask * 8; 1324 return pteidx; 1325 } 1326 1327 volatile struct pte * 1328 pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 1329 { 1330 volatile struct pte *pt; 1331 1332 #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK) 1333 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) 1334 return NULL; 1335 #endif 1336 1337 /* 1338 * If we haven't been supplied the ptegidx, calculate it. 1339 */ 1340 if (pteidx == -1) { 1341 int ptegidx; 1342 ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr); 1343 pteidx = pmap_pvo_pte_index(pvo, ptegidx); 1344 } 1345 1346 pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7]; 1347 1348 #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK) 1349 return pt; 1350 #else 1351 if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { 1352 panic("pmap_pvo_to_pte: pvo %p: has valid pte in " 1353 "pvo but no valid pte index", pvo); 1354 } 1355 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { 1356 panic("pmap_pvo_to_pte: pvo %p: has valid pte index in " 1357 "pvo but no valid pte", pvo); 1358 } 1359 1360 if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { 1361 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) { 1362 #if defined(DEBUG) || defined(PMAPCHECK) 1363 pmap_pte_print(pt); 1364 #endif 1365 panic("pmap_pvo_to_pte: pvo %p: has valid pte in " 1366 "pmap_pteg_table %p but invalid in pvo", 1367 pvo, pt); 1368 } 1369 if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) != 0) { 1370 #if defined(DEBUG) || defined(PMAPCHECK) 1371 pmap_pte_print(pt); 1372 #endif 1373 panic("pmap_pvo_to_pte: pvo %p: pvo pte does " 1374 "not match pte %p in pmap_pteg_table", 1375 pvo, pt); 1376 } 1377 return pt; 1378 } 1379 1380 if (pvo->pvo_pte.pte_hi & PTE_VALID) { 1381 #if defined(DEBUG) || defined(PMAPCHECK) 1382 pmap_pte_print(pt); 1383 #endif 1384 panic("pmap_pvo_to_pte: pvo %p: has nomatching pte %p in " 1385 "pmap_pteg_table but valid in pvo", pvo, pt); 1386 } 1387 return NULL; 1388 #endif /* !(!DIAGNOSTIC && !DEBUG && !PMAPCHECK) */ 1389 } 1390 1391 struct pvo_entry * 1392 pmap_pvo_find_va(pmap_t pm, vaddr_t va, int *pteidx_p) 1393 { 1394 struct pvo_entry *pvo; 1395 int ptegidx; 1396 1397 va &= ~ADDR_POFF; 1398 ptegidx = va_to_pteg(pm, va); 1399 1400 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 1401 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1402 if ((uintptr_t) pvo >= SEGMENT_LENGTH) 1403 panic("pmap_pvo_find_va: invalid pvo %p on " 1404 "list %#x (%p)", pvo, ptegidx, 1405 &pmap_pvo_table[ptegidx]); 1406 #endif 1407 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1408 if (pteidx_p) 1409 *pteidx_p = pmap_pvo_pte_index(pvo, ptegidx); 1410 return pvo; 1411 } 1412 } 1413 if ((pm == pmap_kernel()) && (va < SEGMENT_LENGTH)) 1414 panic("%s: returning NULL for %s pmap, va: %#" _PRIxva "\n", 1415 __func__, (pm == pmap_kernel() ? "kernel" : "user"), va); 1416 return NULL; 1417 } 1418 1419 #if defined(DEBUG) || defined(PMAPCHECK) 1420 void 1421 pmap_pvo_check(const struct pvo_entry *pvo) 1422 { 1423 struct pvo_head *pvo_head; 1424 struct pvo_entry *pvo0; 1425 volatile struct pte *pt; 1426 int failed = 0; 1427 1428 PMAP_LOCK(); 1429 1430 if ((uintptr_t)(pvo+1) >= SEGMENT_LENGTH) 1431 panic("pmap_pvo_check: pvo %p: invalid address", pvo); 1432 1433 if ((uintptr_t)(pvo->pvo_pmap+1) >= SEGMENT_LENGTH) { 1434 printf("pmap_pvo_check: pvo %p: invalid pmap address %p\n", 1435 pvo, pvo->pvo_pmap); 1436 failed = 1; 1437 } 1438 1439 if ((uintptr_t)TAILQ_NEXT(pvo, pvo_olink) >= SEGMENT_LENGTH || 1440 (((uintptr_t)TAILQ_NEXT(pvo, pvo_olink)) & 0x1f) != 0) { 1441 printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n", 1442 pvo, TAILQ_NEXT(pvo, pvo_olink)); 1443 failed = 1; 1444 } 1445 1446 if ((uintptr_t)LIST_NEXT(pvo, pvo_vlink) >= SEGMENT_LENGTH || 1447 (((uintptr_t)LIST_NEXT(pvo, pvo_vlink)) & 0x1f) != 0) { 1448 printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n", 1449 pvo, LIST_NEXT(pvo, pvo_vlink)); 1450 failed = 1; 1451 } 1452 1453 if (PVO_MANAGED_P(pvo)) { 1454 pvo_head = pa_to_pvoh(pvo->pvo_pte.pte_lo & PTE_RPGN, NULL); 1455 } else { 1456 if (pvo->pvo_vaddr < VM_MIN_KERNEL_ADDRESS) { 1457 printf("pmap_pvo_check: pvo %p: non kernel address " 1458 "on kernel unmanaged list\n", pvo); 1459 failed = 1; 1460 } 1461 pvo_head = &pmap_pvo_kunmanaged; 1462 } 1463 LIST_FOREACH(pvo0, pvo_head, pvo_vlink) { 1464 if (pvo0 == pvo) 1465 break; 1466 } 1467 if (pvo0 == NULL) { 1468 printf("pmap_pvo_check: pvo %p: not present " 1469 "on its vlist head %p\n", pvo, pvo_head); 1470 failed = 1; 1471 } 1472 if (pvo != pmap_pvo_find_va(pvo->pvo_pmap, pvo->pvo_vaddr, NULL)) { 1473 printf("pmap_pvo_check: pvo %p: not present " 1474 "on its olist head\n", pvo); 1475 failed = 1; 1476 } 1477 pt = pmap_pvo_to_pte(pvo, -1); 1478 if (pt == NULL) { 1479 if (pvo->pvo_pte.pte_hi & PTE_VALID) { 1480 printf("pmap_pvo_check: pvo %p: pte_hi VALID but " 1481 "no PTE\n", pvo); 1482 failed = 1; 1483 } 1484 } else { 1485 if ((uintptr_t) pt < (uintptr_t) &pmap_pteg_table[0] || 1486 (uintptr_t) pt >= 1487 (uintptr_t) &pmap_pteg_table[pmap_pteg_cnt]) { 1488 printf("pmap_pvo_check: pvo %p: pte %p not in " 1489 "pteg table\n", pvo, pt); 1490 failed = 1; 1491 } 1492 if (((((uintptr_t) pt) >> 3) & 7) != PVO_PTEGIDX_GET(pvo)) { 1493 printf("pmap_pvo_check: pvo %p: pte_hi VALID but " 1494 "no PTE\n", pvo); 1495 failed = 1; 1496 } 1497 if (pvo->pvo_pte.pte_hi != pt->pte_hi) { 1498 printf("pmap_pvo_check: pvo %p: pte_hi differ: " 1499 "%#" _PRIxpte "/%#" _PRIxpte "\n", pvo, 1500 pvo->pvo_pte.pte_hi, 1501 pt->pte_hi); 1502 failed = 1; 1503 } 1504 if (((pvo->pvo_pte.pte_lo ^ pt->pte_lo) & 1505 (PTE_PP|PTE_WIMG|PTE_RPGN)) != 0) { 1506 printf("pmap_pvo_check: pvo %p: pte_lo differ: " 1507 "%#" _PRIxpte "/%#" _PRIxpte "\n", pvo, 1508 (pvo->pvo_pte.pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN)), 1509 (pt->pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN))); 1510 failed = 1; 1511 } 1512 if ((pmap_pte_to_va(pt) ^ PVO_VADDR(pvo)) & 0x0fffffff) { 1513 printf("pmap_pvo_check: pvo %p: PTE %p derived VA %#" _PRIxva "" 1514 " doesn't not match PVO's VA %#" _PRIxva "\n", 1515 pvo, pt, pmap_pte_to_va(pt), PVO_VADDR(pvo)); 1516 failed = 1; 1517 } 1518 if (failed) 1519 pmap_pte_print(pt); 1520 } 1521 if (failed) 1522 panic("pmap_pvo_check: pvo %p, pm %p: bugcheck!", pvo, 1523 pvo->pvo_pmap); 1524 1525 PMAP_UNLOCK(); 1526 } 1527 #endif /* DEBUG || PMAPCHECK */ 1528 1529 /* 1530 * Search the PVO table looking for a non-wired entry. 1531 * If we find one, remove it and return it. 1532 */ 1533 1534 struct pvo_entry * 1535 pmap_pvo_reclaim(struct pmap *pm) 1536 { 1537 struct pvo_tqhead *pvoh; 1538 struct pvo_entry *pvo; 1539 uint32_t idx, endidx; 1540 1541 endidx = pmap_pvo_reclaim_nextidx; 1542 for (idx = (endidx + 1) & pmap_pteg_mask; idx != endidx; 1543 idx = (idx + 1) & pmap_pteg_mask) { 1544 pvoh = &pmap_pvo_table[idx]; 1545 TAILQ_FOREACH(pvo, pvoh, pvo_olink) { 1546 if (!PVO_WIRED_P(pvo)) { 1547 pmap_pvo_remove(pvo, -1, NULL); 1548 pmap_pvo_reclaim_nextidx = idx; 1549 PMAPCOUNT(pvos_reclaimed); 1550 return pvo; 1551 } 1552 } 1553 } 1554 return NULL; 1555 } 1556 1557 static struct pool * 1558 pmap_pvo_pl(struct pvo_entry *pvo) 1559 { 1560 1561 return PVO_MANAGED_P(pvo) ? &pmap_mpvo_pool : &pmap_upvo_pool; 1562 } 1563 1564 /* 1565 * This returns whether this is the first mapping of a page. 1566 */ 1567 int 1568 pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head, 1569 vaddr_t va, paddr_t pa, register_t pte_lo, int flags) 1570 { 1571 struct pvo_entry *pvo; 1572 struct pvo_tqhead *pvoh; 1573 register_t msr; 1574 int ptegidx; 1575 int i; 1576 int poolflags = PR_NOWAIT; 1577 1578 /* 1579 * Compute the PTE Group index. 1580 */ 1581 va &= ~ADDR_POFF; 1582 ptegidx = va_to_pteg(pm, va); 1583 1584 msr = pmap_interrupts_off(); 1585 1586 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1587 if (pmap_pvo_remove_depth > 0) 1588 panic("pmap_pvo_enter: called while pmap_pvo_remove active!"); 1589 if (++pmap_pvo_enter_depth > 1) 1590 panic("pmap_pvo_enter: called recursively!"); 1591 #endif 1592 1593 /* 1594 * Remove any existing mapping for this page. Reuse the 1595 * pvo entry if there a mapping. 1596 */ 1597 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 1598 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1599 #ifdef DEBUG 1600 if ((pmapdebug & PMAPDEBUG_PVOENTER) && 1601 ((pvo->pvo_pte.pte_lo ^ (pa|pte_lo)) & 1602 ~(PTE_REF|PTE_CHG)) == 0 && 1603 va < VM_MIN_KERNEL_ADDRESS) { 1604 printf("pmap_pvo_enter: pvo %p: dup %#" _PRIxpte "/%#" _PRIxpa "\n", 1605 pvo, pvo->pvo_pte.pte_lo, pte_lo|pa); 1606 printf("pmap_pvo_enter: pte_hi=%#" _PRIxpte " sr=%#" _PRIsr "\n", 1607 pvo->pvo_pte.pte_hi, 1608 pm->pm_sr[va >> ADDR_SR_SHFT]); 1609 pmap_pte_print(pmap_pvo_to_pte(pvo, -1)); 1610 #ifdef DDBX 1611 Debugger(); 1612 #endif 1613 } 1614 #endif 1615 PMAPCOUNT(mappings_replaced); 1616 pmap_pvo_remove(pvo, -1, NULL); 1617 break; 1618 } 1619 } 1620 1621 /* 1622 * If we aren't overwriting an mapping, try to allocate 1623 */ 1624 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1625 --pmap_pvo_enter_depth; 1626 #endif 1627 pmap_interrupts_restore(msr); 1628 if (pvo) { 1629 KASSERT(pmap_pvo_pl(pvo) == pl); 1630 } else { 1631 pvo = pool_get(pl, poolflags); 1632 } 1633 KASSERT((vaddr_t)pvo < VM_MIN_KERNEL_ADDRESS); 1634 1635 #ifdef DEBUG 1636 /* 1637 * Exercise pmap_pvo_reclaim() a little. 1638 */ 1639 if (pvo && (flags & PMAP_CANFAIL) != 0 && 1640 pmap_pvo_reclaim_debugctr++ > 0x1000 && 1641 (pmap_pvo_reclaim_debugctr & 0xff) == 0) { 1642 pool_put(pl, pvo); 1643 pvo = NULL; 1644 } 1645 #endif 1646 1647 msr = pmap_interrupts_off(); 1648 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1649 ++pmap_pvo_enter_depth; 1650 #endif 1651 if (pvo == NULL) { 1652 pvo = pmap_pvo_reclaim(pm); 1653 if (pvo == NULL) { 1654 if ((flags & PMAP_CANFAIL) == 0) 1655 panic("pmap_pvo_enter: failed"); 1656 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1657 pmap_pvo_enter_depth--; 1658 #endif 1659 PMAPCOUNT(pvos_failed); 1660 pmap_interrupts_restore(msr); 1661 return ENOMEM; 1662 } 1663 } 1664 1665 pvo->pvo_vaddr = va; 1666 pvo->pvo_pmap = pm; 1667 pvo->pvo_vaddr &= ~ADDR_POFF; 1668 if (flags & VM_PROT_EXECUTE) { 1669 PMAPCOUNT(exec_mappings); 1670 pvo_set_exec(pvo); 1671 } 1672 if (flags & PMAP_WIRED) 1673 pvo->pvo_vaddr |= PVO_WIRED; 1674 if (pvo_head != &pmap_pvo_kunmanaged) { 1675 pvo->pvo_vaddr |= PVO_MANAGED; 1676 PMAPCOUNT(mappings); 1677 } else { 1678 PMAPCOUNT(kernel_mappings); 1679 } 1680 pmap_pte_create(&pvo->pvo_pte, pm, va, pa | pte_lo); 1681 1682 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 1683 if (PVO_WIRED_P(pvo)) 1684 pvo->pvo_pmap->pm_stats.wired_count++; 1685 pvo->pvo_pmap->pm_stats.resident_count++; 1686 #if defined(DEBUG) 1687 /* if (pm != pmap_kernel() && va < VM_MIN_KERNEL_ADDRESS) */ 1688 DPRINTFN(PVOENTER, 1689 "pmap_pvo_enter: pvo %p: pm %p va %#" _PRIxva " pa %#" _PRIxpa "\n", 1690 pvo, pm, va, pa); 1691 #endif 1692 1693 /* 1694 * We hope this succeeds but it isn't required. 1695 */ 1696 pvoh = &pmap_pvo_table[ptegidx]; 1697 i = pmap_pte_insert(ptegidx, &pvo->pvo_pte); 1698 if (i >= 0) { 1699 PVO_PTEGIDX_SET(pvo, i); 1700 PVO_WHERE(pvo, ENTER_INSERT); 1701 PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID) 1702 ? pmap_evcnt_ptes_secondary : pmap_evcnt_ptes_primary)[i]); 1703 TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink); 1704 1705 } else { 1706 /* 1707 * Since we didn't have room for this entry (which makes it 1708 * and evicted entry), place it at the head of the list. 1709 */ 1710 TAILQ_INSERT_HEAD(pvoh, pvo, pvo_olink); 1711 PMAPCOUNT(ptes_evicted); 1712 pm->pm_evictions++; 1713 /* 1714 * If this is a kernel page, make sure it's active. 1715 */ 1716 if (pm == pmap_kernel()) { 1717 i = pmap_pte_spill(pm, va, false); 1718 KASSERT(i); 1719 } 1720 } 1721 PMAP_PVO_CHECK(pvo); /* sanity check */ 1722 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1723 pmap_pvo_enter_depth--; 1724 #endif 1725 pmap_interrupts_restore(msr); 1726 return 0; 1727 } 1728 1729 static void 1730 pmap_pvo_remove(struct pvo_entry *pvo, int pteidx, struct pvo_head *pvol) 1731 { 1732 volatile struct pte *pt; 1733 int ptegidx; 1734 1735 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1736 if (++pmap_pvo_remove_depth > 1) 1737 panic("pmap_pvo_remove: called recursively!"); 1738 #endif 1739 1740 /* 1741 * If we haven't been supplied the ptegidx, calculate it. 1742 */ 1743 if (pteidx == -1) { 1744 ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr); 1745 pteidx = pmap_pvo_pte_index(pvo, ptegidx); 1746 } else { 1747 ptegidx = pteidx >> 3; 1748 if (pvo->pvo_pte.pte_hi & PTE_HID) 1749 ptegidx ^= pmap_pteg_mask; 1750 } 1751 PMAP_PVO_CHECK(pvo); /* sanity check */ 1752 1753 /* 1754 * If there is an active pte entry, we need to deactivate it 1755 * (and save the ref & chg bits). 1756 */ 1757 pt = pmap_pvo_to_pte(pvo, pteidx); 1758 if (pt != NULL) { 1759 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1760 PVO_WHERE(pvo, REMOVE); 1761 PVO_PTEGIDX_CLR(pvo); 1762 PMAPCOUNT(ptes_removed); 1763 } else { 1764 KASSERT(pvo->pvo_pmap->pm_evictions > 0); 1765 pvo->pvo_pmap->pm_evictions--; 1766 } 1767 1768 /* 1769 * Account for executable mappings. 1770 */ 1771 if (PVO_EXECUTABLE_P(pvo)) 1772 pvo_clear_exec(pvo); 1773 1774 /* 1775 * Update our statistics. 1776 */ 1777 pvo->pvo_pmap->pm_stats.resident_count--; 1778 if (PVO_WIRED_P(pvo)) 1779 pvo->pvo_pmap->pm_stats.wired_count--; 1780 1781 /* 1782 * Save the REF/CHG bits into their cache if the page is managed. 1783 */ 1784 if (PVO_MANAGED_P(pvo)) { 1785 register_t ptelo = pvo->pvo_pte.pte_lo; 1786 struct vm_page *pg = PHYS_TO_VM_PAGE(ptelo & PTE_RPGN); 1787 1788 if (pg != NULL) { 1789 /* 1790 * If this page was changed and it is mapped exec, 1791 * invalidate it. 1792 */ 1793 if ((ptelo & PTE_CHG) && 1794 (pmap_attr_fetch(pg) & PTE_EXEC)) { 1795 struct pvo_head *pvoh = vm_page_to_pvoh(pg); 1796 if (LIST_EMPTY(pvoh)) { 1797 DPRINTFN(EXEC, "[pmap_pvo_remove: " 1798 "%#" _PRIxpa ": clear-exec]\n", 1799 VM_PAGE_TO_PHYS(pg)); 1800 pmap_attr_clear(pg, PTE_EXEC); 1801 PMAPCOUNT(exec_uncached_pvo_remove); 1802 } else { 1803 DPRINTFN(EXEC, "[pmap_pvo_remove: " 1804 "%#" _PRIxpa ": syncicache]\n", 1805 VM_PAGE_TO_PHYS(pg)); 1806 pmap_syncicache(VM_PAGE_TO_PHYS(pg), 1807 PAGE_SIZE); 1808 PMAPCOUNT(exec_synced_pvo_remove); 1809 } 1810 } 1811 1812 pmap_attr_save(pg, ptelo & (PTE_REF|PTE_CHG)); 1813 } 1814 PMAPCOUNT(unmappings); 1815 } else { 1816 PMAPCOUNT(kernel_unmappings); 1817 } 1818 1819 /* 1820 * Remove the PVO from its lists and return it to the pool. 1821 */ 1822 LIST_REMOVE(pvo, pvo_vlink); 1823 TAILQ_REMOVE(&pmap_pvo_table[ptegidx], pvo, pvo_olink); 1824 if (pvol) { 1825 LIST_INSERT_HEAD(pvol, pvo, pvo_vlink); 1826 } 1827 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1828 pmap_pvo_remove_depth--; 1829 #endif 1830 } 1831 1832 void 1833 pmap_pvo_free(struct pvo_entry *pvo) 1834 { 1835 1836 pool_put(pmap_pvo_pl(pvo), pvo); 1837 } 1838 1839 void 1840 pmap_pvo_free_list(struct pvo_head *pvol) 1841 { 1842 struct pvo_entry *pvo, *npvo; 1843 1844 for (pvo = LIST_FIRST(pvol); pvo != NULL; pvo = npvo) { 1845 npvo = LIST_NEXT(pvo, pvo_vlink); 1846 LIST_REMOVE(pvo, pvo_vlink); 1847 pmap_pvo_free(pvo); 1848 } 1849 } 1850 1851 /* 1852 * Mark a mapping as executable. 1853 * If this is the first executable mapping in the segment, 1854 * clear the noexec flag. 1855 */ 1856 static void 1857 pvo_set_exec(struct pvo_entry *pvo) 1858 { 1859 struct pmap *pm = pvo->pvo_pmap; 1860 1861 if (pm == pmap_kernel() || PVO_EXECUTABLE_P(pvo)) { 1862 return; 1863 } 1864 pvo->pvo_vaddr |= PVO_EXECUTABLE; 1865 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 1866 { 1867 int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT; 1868 if (pm->pm_exec[sr]++ == 0) { 1869 pm->pm_sr[sr] &= ~SR_NOEXEC; 1870 } 1871 } 1872 #endif 1873 } 1874 1875 /* 1876 * Mark a mapping as non-executable. 1877 * If this was the last executable mapping in the segment, 1878 * set the noexec flag. 1879 */ 1880 static void 1881 pvo_clear_exec(struct pvo_entry *pvo) 1882 { 1883 struct pmap *pm = pvo->pvo_pmap; 1884 1885 if (pm == pmap_kernel() || !PVO_EXECUTABLE_P(pvo)) { 1886 return; 1887 } 1888 pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 1889 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 1890 { 1891 int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT; 1892 if (--pm->pm_exec[sr] == 0) { 1893 pm->pm_sr[sr] |= SR_NOEXEC; 1894 } 1895 } 1896 #endif 1897 } 1898 1899 /* 1900 * Insert physical page at pa into the given pmap at virtual address va. 1901 */ 1902 int 1903 pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 1904 { 1905 struct mem_region *mp; 1906 struct pvo_head *pvo_head; 1907 struct vm_page *pg; 1908 struct pool *pl; 1909 register_t pte_lo; 1910 int error; 1911 u_int was_exec = 0; 1912 1913 PMAP_LOCK(); 1914 1915 if (__predict_false(!pmap_initialized)) { 1916 pvo_head = &pmap_pvo_kunmanaged; 1917 pl = &pmap_upvo_pool; 1918 pg = NULL; 1919 was_exec = PTE_EXEC; 1920 } else { 1921 pvo_head = pa_to_pvoh(pa, &pg); 1922 pl = &pmap_mpvo_pool; 1923 } 1924 1925 DPRINTFN(ENTER, 1926 "pmap_enter(%p, %#" _PRIxva ", %#" _PRIxpa ", 0x%x, 0x%x):", 1927 pm, va, pa, prot, flags); 1928 1929 /* 1930 * If this is a managed page, and it's the first reference to the 1931 * page clear the execness of the page. Otherwise fetch the execness. 1932 */ 1933 if (pg != NULL) 1934 was_exec = pmap_attr_fetch(pg) & PTE_EXEC; 1935 1936 DPRINTFN(ENTER, " was_exec=%d", was_exec); 1937 1938 /* 1939 * Assume the page is cache inhibited and access is guarded unless 1940 * it's in our available memory array. If it is in the memory array, 1941 * asssume it's in memory coherent memory. 1942 */ 1943 if (flags & PMAP_MD_PREFETCHABLE) { 1944 pte_lo = 0; 1945 } else 1946 pte_lo = PTE_G; 1947 1948 if ((flags & PMAP_NOCACHE) == 0) { 1949 for (mp = mem; mp->size; mp++) { 1950 if (pa >= mp->start && pa < mp->start + mp->size) { 1951 pte_lo = PTE_M; 1952 break; 1953 } 1954 } 1955 #ifdef MULTIPROCESSOR 1956 if (((mfpvr() >> 16) & 0xffff) == MPC603e) 1957 pte_lo = PTE_M; 1958 #endif 1959 } else { 1960 pte_lo |= PTE_I; 1961 } 1962 1963 if (prot & VM_PROT_WRITE) 1964 pte_lo |= PTE_BW; 1965 else 1966 pte_lo |= PTE_BR; 1967 1968 /* 1969 * If this was in response to a fault, "pre-fault" the PTE's 1970 * changed/referenced bit appropriately. 1971 */ 1972 if (flags & VM_PROT_WRITE) 1973 pte_lo |= PTE_CHG; 1974 if (flags & VM_PROT_ALL) 1975 pte_lo |= PTE_REF; 1976 1977 /* 1978 * We need to know if this page can be executable 1979 */ 1980 flags |= (prot & VM_PROT_EXECUTE); 1981 1982 /* 1983 * Record mapping for later back-translation and pte spilling. 1984 * This will overwrite any existing mapping. 1985 */ 1986 error = pmap_pvo_enter(pm, pl, pvo_head, va, pa, pte_lo, flags); 1987 1988 /* 1989 * Flush the real page from the instruction cache if this page is 1990 * mapped executable and cacheable and has not been flushed since 1991 * the last time it was modified. 1992 */ 1993 if (error == 0 && 1994 (flags & VM_PROT_EXECUTE) && 1995 (pte_lo & PTE_I) == 0 && 1996 was_exec == 0) { 1997 DPRINTFN(ENTER, " %s", "syncicache"); 1998 PMAPCOUNT(exec_synced); 1999 pmap_syncicache(pa, PAGE_SIZE); 2000 if (pg != NULL) { 2001 pmap_attr_save(pg, PTE_EXEC); 2002 PMAPCOUNT(exec_cached); 2003 #if defined(DEBUG) || defined(PMAPDEBUG) 2004 if (pmapdebug & PMAPDEBUG_ENTER) 2005 printf(" marked-as-exec"); 2006 else if (pmapdebug & PMAPDEBUG_EXEC) 2007 printf("[pmap_enter: %#" _PRIxpa ": marked-as-exec]\n", 2008 VM_PAGE_TO_PHYS(pg)); 2009 2010 #endif 2011 } 2012 } 2013 2014 DPRINTFN(ENTER, ": error=%d\n", error); 2015 2016 PMAP_UNLOCK(); 2017 2018 return error; 2019 } 2020 2021 void 2022 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 2023 { 2024 struct mem_region *mp; 2025 register_t pte_lo; 2026 int error; 2027 2028 #if defined (PMAP_OEA64_BRIDGE) || defined (PMAP_OEA) 2029 if (va < VM_MIN_KERNEL_ADDRESS) 2030 panic("pmap_kenter_pa: attempt to enter " 2031 "non-kernel address %#" _PRIxva "!", va); 2032 #endif 2033 2034 DPRINTFN(KENTER, 2035 "pmap_kenter_pa(%#" _PRIxva ",%#" _PRIxpa ",%#x)\n", va, pa, prot); 2036 2037 PMAP_LOCK(); 2038 2039 /* 2040 * Assume the page is cache inhibited and access is guarded unless 2041 * it's in our available memory array. If it is in the memory array, 2042 * asssume it's in memory coherent memory. 2043 */ 2044 pte_lo = PTE_IG; 2045 if ((flags & PMAP_NOCACHE) == 0) { 2046 for (mp = mem; mp->size; mp++) { 2047 if (pa >= mp->start && pa < mp->start + mp->size) { 2048 pte_lo = PTE_M; 2049 break; 2050 } 2051 } 2052 #ifdef MULTIPROCESSOR 2053 if (((mfpvr() >> 16) & 0xffff) == MPC603e) 2054 pte_lo = PTE_M; 2055 #endif 2056 } 2057 2058 if (prot & VM_PROT_WRITE) 2059 pte_lo |= PTE_BW; 2060 else 2061 pte_lo |= PTE_BR; 2062 2063 /* 2064 * We don't care about REF/CHG on PVOs on the unmanaged list. 2065 */ 2066 error = pmap_pvo_enter(pmap_kernel(), &pmap_upvo_pool, 2067 &pmap_pvo_kunmanaged, va, pa, pte_lo, prot|PMAP_WIRED); 2068 2069 if (error != 0) 2070 panic("pmap_kenter_pa: failed to enter va %#" _PRIxva " pa %#" _PRIxpa ": %d", 2071 va, pa, error); 2072 2073 PMAP_UNLOCK(); 2074 } 2075 2076 void 2077 pmap_kremove(vaddr_t va, vsize_t len) 2078 { 2079 if (va < VM_MIN_KERNEL_ADDRESS) 2080 panic("pmap_kremove: attempt to remove " 2081 "non-kernel address %#" _PRIxva "!", va); 2082 2083 DPRINTFN(KREMOVE, "pmap_kremove(%#" _PRIxva ",%#" _PRIxva ")\n", va, len); 2084 pmap_remove(pmap_kernel(), va, va + len); 2085 } 2086 2087 /* 2088 * Remove the given range of mapping entries. 2089 */ 2090 void 2091 pmap_remove(pmap_t pm, vaddr_t va, vaddr_t endva) 2092 { 2093 struct pvo_head pvol; 2094 struct pvo_entry *pvo; 2095 register_t msr; 2096 int pteidx; 2097 2098 PMAP_LOCK(); 2099 LIST_INIT(&pvol); 2100 msr = pmap_interrupts_off(); 2101 for (; va < endva; va += PAGE_SIZE) { 2102 pvo = pmap_pvo_find_va(pm, va, &pteidx); 2103 if (pvo != NULL) { 2104 pmap_pvo_remove(pvo, pteidx, &pvol); 2105 } 2106 } 2107 pmap_interrupts_restore(msr); 2108 pmap_pvo_free_list(&pvol); 2109 PMAP_UNLOCK(); 2110 } 2111 2112 /* 2113 * Get the physical page address for the given pmap/virtual address. 2114 */ 2115 bool 2116 pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap) 2117 { 2118 struct pvo_entry *pvo; 2119 register_t msr; 2120 2121 PMAP_LOCK(); 2122 2123 /* 2124 * If this is a kernel pmap lookup, also check the battable 2125 * and if we get a hit, translate the VA to a PA using the 2126 * BAT entries. Don't check for VM_MAX_KERNEL_ADDRESS is 2127 * that will wrap back to 0. 2128 */ 2129 if (pm == pmap_kernel() && 2130 (va < VM_MIN_KERNEL_ADDRESS || 2131 (KERNEL2_SR < 15 && VM_MAX_KERNEL_ADDRESS <= va))) { 2132 KASSERT((va >> ADDR_SR_SHFT) != USER_SR); 2133 #if defined (PMAP_OEA) 2134 #ifdef PPC_OEA601 2135 if ((MFPVR() >> 16) == MPC601) { 2136 register_t batu = battable[va >> 23].batu; 2137 register_t batl = battable[va >> 23].batl; 2138 register_t sr = iosrtable[va >> ADDR_SR_SHFT]; 2139 if (BAT601_VALID_P(batl) && 2140 BAT601_VA_MATCH_P(batu, batl, va)) { 2141 register_t mask = 2142 (~(batl & BAT601_BSM) << 17) & ~0x1ffffL; 2143 if (pap) 2144 *pap = (batl & mask) | (va & ~mask); 2145 PMAP_UNLOCK(); 2146 return true; 2147 } else if (SR601_VALID_P(sr) && 2148 SR601_PA_MATCH_P(sr, va)) { 2149 if (pap) 2150 *pap = va; 2151 PMAP_UNLOCK(); 2152 return true; 2153 } 2154 } else 2155 #endif /* PPC_OEA601 */ 2156 { 2157 register_t batu = battable[BAT_VA2IDX(va)].batu; 2158 if (BAT_VALID_P(batu,0) && BAT_VA_MATCH_P(batu,va)) { 2159 register_t batl = battable[BAT_VA2IDX(va)].batl; 2160 register_t mask = 2161 (~(batu & (BAT_XBL|BAT_BL)) << 15) & ~0x1ffffL; 2162 if (pap) 2163 *pap = (batl & mask) | (va & ~mask); 2164 PMAP_UNLOCK(); 2165 return true; 2166 } 2167 } 2168 PMAP_UNLOCK(); 2169 return false; 2170 #elif defined (PMAP_OEA64_BRIDGE) 2171 if (va >= SEGMENT_LENGTH) 2172 panic("%s: pm: %s va >= SEGMENT_LENGTH, va: 0x%08lx\n", 2173 __func__, (pm == pmap_kernel() ? "kernel" : "user"), va); 2174 else { 2175 if (pap) 2176 *pap = va; 2177 PMAP_UNLOCK(); 2178 return true; 2179 } 2180 #elif defined (PMAP_OEA64) 2181 #error PPC_OEA64 not supported 2182 #endif /* PPC_OEA */ 2183 } 2184 2185 msr = pmap_interrupts_off(); 2186 pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 2187 if (pvo != NULL) { 2188 PMAP_PVO_CHECK(pvo); /* sanity check */ 2189 if (pap) 2190 *pap = (pvo->pvo_pte.pte_lo & PTE_RPGN) 2191 | (va & ADDR_POFF); 2192 } 2193 pmap_interrupts_restore(msr); 2194 PMAP_UNLOCK(); 2195 return pvo != NULL; 2196 } 2197 2198 /* 2199 * Lower the protection on the specified range of this pmap. 2200 */ 2201 void 2202 pmap_protect(pmap_t pm, vaddr_t va, vaddr_t endva, vm_prot_t prot) 2203 { 2204 struct pvo_entry *pvo; 2205 volatile struct pte *pt; 2206 register_t msr; 2207 int pteidx; 2208 2209 /* 2210 * Since this routine only downgrades protection, we should 2211 * always be called with at least one bit not set. 2212 */ 2213 KASSERT(prot != VM_PROT_ALL); 2214 2215 /* 2216 * If there is no protection, this is equivalent to 2217 * remove the pmap from the pmap. 2218 */ 2219 if ((prot & VM_PROT_READ) == 0) { 2220 pmap_remove(pm, va, endva); 2221 return; 2222 } 2223 2224 PMAP_LOCK(); 2225 2226 msr = pmap_interrupts_off(); 2227 for (; va < endva; va += PAGE_SIZE) { 2228 pvo = pmap_pvo_find_va(pm, va, &pteidx); 2229 if (pvo == NULL) 2230 continue; 2231 PMAP_PVO_CHECK(pvo); /* sanity check */ 2232 2233 /* 2234 * Revoke executable if asked to do so. 2235 */ 2236 if ((prot & VM_PROT_EXECUTE) == 0) 2237 pvo_clear_exec(pvo); 2238 2239 #if 0 2240 /* 2241 * If the page is already read-only, no change 2242 * needs to be made. 2243 */ 2244 if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) 2245 continue; 2246 #endif 2247 /* 2248 * Grab the PTE pointer before we diddle with 2249 * the cached PTE copy. 2250 */ 2251 pt = pmap_pvo_to_pte(pvo, pteidx); 2252 /* 2253 * Change the protection of the page. 2254 */ 2255 pvo->pvo_pte.pte_lo &= ~PTE_PP; 2256 pvo->pvo_pte.pte_lo |= PTE_BR; 2257 2258 /* 2259 * If the PVO is in the page table, update 2260 * that pte at well. 2261 */ 2262 if (pt != NULL) { 2263 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 2264 PVO_WHERE(pvo, PMAP_PROTECT); 2265 PMAPCOUNT(ptes_changed); 2266 } 2267 2268 PMAP_PVO_CHECK(pvo); /* sanity check */ 2269 } 2270 pmap_interrupts_restore(msr); 2271 PMAP_UNLOCK(); 2272 } 2273 2274 void 2275 pmap_unwire(pmap_t pm, vaddr_t va) 2276 { 2277 struct pvo_entry *pvo; 2278 register_t msr; 2279 2280 PMAP_LOCK(); 2281 msr = pmap_interrupts_off(); 2282 pvo = pmap_pvo_find_va(pm, va, NULL); 2283 if (pvo != NULL) { 2284 if (PVO_WIRED_P(pvo)) { 2285 pvo->pvo_vaddr &= ~PVO_WIRED; 2286 pm->pm_stats.wired_count--; 2287 } 2288 PMAP_PVO_CHECK(pvo); /* sanity check */ 2289 } 2290 pmap_interrupts_restore(msr); 2291 PMAP_UNLOCK(); 2292 } 2293 2294 /* 2295 * Lower the protection on the specified physical page. 2296 */ 2297 void 2298 pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 2299 { 2300 struct pvo_head *pvo_head, pvol; 2301 struct pvo_entry *pvo, *next_pvo; 2302 volatile struct pte *pt; 2303 register_t msr; 2304 2305 PMAP_LOCK(); 2306 2307 KASSERT(prot != VM_PROT_ALL); 2308 LIST_INIT(&pvol); 2309 msr = pmap_interrupts_off(); 2310 2311 /* 2312 * When UVM reuses a page, it does a pmap_page_protect with 2313 * VM_PROT_NONE. At that point, we can clear the exec flag 2314 * since we know the page will have different contents. 2315 */ 2316 if ((prot & VM_PROT_READ) == 0) { 2317 DPRINTFN(EXEC, "[pmap_page_protect: %#" _PRIxpa ": clear-exec]\n", 2318 VM_PAGE_TO_PHYS(pg)); 2319 if (pmap_attr_fetch(pg) & PTE_EXEC) { 2320 PMAPCOUNT(exec_uncached_page_protect); 2321 pmap_attr_clear(pg, PTE_EXEC); 2322 } 2323 } 2324 2325 pvo_head = vm_page_to_pvoh(pg); 2326 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 2327 next_pvo = LIST_NEXT(pvo, pvo_vlink); 2328 PMAP_PVO_CHECK(pvo); /* sanity check */ 2329 2330 /* 2331 * Downgrading to no mapping at all, we just remove the entry. 2332 */ 2333 if ((prot & VM_PROT_READ) == 0) { 2334 pmap_pvo_remove(pvo, -1, &pvol); 2335 continue; 2336 } 2337 2338 /* 2339 * If EXEC permission is being revoked, just clear the 2340 * flag in the PVO. 2341 */ 2342 if ((prot & VM_PROT_EXECUTE) == 0) 2343 pvo_clear_exec(pvo); 2344 2345 /* 2346 * If this entry is already RO, don't diddle with the 2347 * page table. 2348 */ 2349 if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) { 2350 PMAP_PVO_CHECK(pvo); 2351 continue; 2352 } 2353 2354 /* 2355 * Grab the PTE before the we diddle the bits so 2356 * pvo_to_pte can verify the pte contents are as 2357 * expected. 2358 */ 2359 pt = pmap_pvo_to_pte(pvo, -1); 2360 pvo->pvo_pte.pte_lo &= ~PTE_PP; 2361 pvo->pvo_pte.pte_lo |= PTE_BR; 2362 if (pt != NULL) { 2363 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 2364 PVO_WHERE(pvo, PMAP_PAGE_PROTECT); 2365 PMAPCOUNT(ptes_changed); 2366 } 2367 PMAP_PVO_CHECK(pvo); /* sanity check */ 2368 } 2369 pmap_interrupts_restore(msr); 2370 pmap_pvo_free_list(&pvol); 2371 2372 PMAP_UNLOCK(); 2373 } 2374 2375 /* 2376 * Activate the address space for the specified process. If the process 2377 * is the current process, load the new MMU context. 2378 */ 2379 void 2380 pmap_activate(struct lwp *l) 2381 { 2382 struct pcb *pcb = lwp_getpcb(l); 2383 pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap; 2384 2385 DPRINTFN(ACTIVATE, 2386 "pmap_activate: lwp %p (curlwp %p)\n", l, curlwp); 2387 2388 /* 2389 * XXX Normally performed in cpu_lwp_fork(). 2390 */ 2391 pcb->pcb_pm = pmap; 2392 2393 /* 2394 * In theory, the SR registers need only be valid on return 2395 * to user space wait to do them there. 2396 */ 2397 if (l == curlwp) { 2398 /* Store pointer to new current pmap. */ 2399 curpm = pmap; 2400 } 2401 } 2402 2403 /* 2404 * Deactivate the specified process's address space. 2405 */ 2406 void 2407 pmap_deactivate(struct lwp *l) 2408 { 2409 } 2410 2411 bool 2412 pmap_query_bit(struct vm_page *pg, int ptebit) 2413 { 2414 struct pvo_entry *pvo; 2415 volatile struct pte *pt; 2416 register_t msr; 2417 2418 PMAP_LOCK(); 2419 2420 if (pmap_attr_fetch(pg) & ptebit) { 2421 PMAP_UNLOCK(); 2422 return true; 2423 } 2424 2425 msr = pmap_interrupts_off(); 2426 LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) { 2427 PMAP_PVO_CHECK(pvo); /* sanity check */ 2428 /* 2429 * See if we saved the bit off. If so cache, it and return 2430 * success. 2431 */ 2432 if (pvo->pvo_pte.pte_lo & ptebit) { 2433 pmap_attr_save(pg, ptebit); 2434 PMAP_PVO_CHECK(pvo); /* sanity check */ 2435 pmap_interrupts_restore(msr); 2436 PMAP_UNLOCK(); 2437 return true; 2438 } 2439 } 2440 /* 2441 * No luck, now go thru the hard part of looking at the ptes 2442 * themselves. Sync so any pending REF/CHG bits are flushed 2443 * to the PTEs. 2444 */ 2445 SYNC(); 2446 LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) { 2447 PMAP_PVO_CHECK(pvo); /* sanity check */ 2448 /* 2449 * See if this pvo have a valid PTE. If so, fetch the 2450 * REF/CHG bits from the valid PTE. If the appropriate 2451 * ptebit is set, cache, it and return success. 2452 */ 2453 pt = pmap_pvo_to_pte(pvo, -1); 2454 if (pt != NULL) { 2455 pmap_pte_synch(pt, &pvo->pvo_pte); 2456 if (pvo->pvo_pte.pte_lo & ptebit) { 2457 pmap_attr_save(pg, ptebit); 2458 PMAP_PVO_CHECK(pvo); /* sanity check */ 2459 pmap_interrupts_restore(msr); 2460 PMAP_UNLOCK(); 2461 return true; 2462 } 2463 } 2464 } 2465 pmap_interrupts_restore(msr); 2466 PMAP_UNLOCK(); 2467 return false; 2468 } 2469 2470 bool 2471 pmap_clear_bit(struct vm_page *pg, int ptebit) 2472 { 2473 struct pvo_head *pvoh = vm_page_to_pvoh(pg); 2474 struct pvo_entry *pvo; 2475 volatile struct pte *pt; 2476 register_t msr; 2477 int rv = 0; 2478 2479 PMAP_LOCK(); 2480 msr = pmap_interrupts_off(); 2481 2482 /* 2483 * Fetch the cache value 2484 */ 2485 rv |= pmap_attr_fetch(pg); 2486 2487 /* 2488 * Clear the cached value. 2489 */ 2490 pmap_attr_clear(pg, ptebit); 2491 2492 /* 2493 * Sync so any pending REF/CHG bits are flushed to the PTEs (so we 2494 * can reset the right ones). Note that since the pvo entries and 2495 * list heads are accessed via BAT0 and are never placed in the 2496 * page table, we don't have to worry about further accesses setting 2497 * the REF/CHG bits. 2498 */ 2499 SYNC(); 2500 2501 /* 2502 * For each pvo entry, clear pvo's ptebit. If this pvo have a 2503 * valid PTE. If so, clear the ptebit from the valid PTE. 2504 */ 2505 LIST_FOREACH(pvo, pvoh, pvo_vlink) { 2506 PMAP_PVO_CHECK(pvo); /* sanity check */ 2507 pt = pmap_pvo_to_pte(pvo, -1); 2508 if (pt != NULL) { 2509 /* 2510 * Only sync the PTE if the bit we are looking 2511 * for is not already set. 2512 */ 2513 if ((pvo->pvo_pte.pte_lo & ptebit) == 0) 2514 pmap_pte_synch(pt, &pvo->pvo_pte); 2515 /* 2516 * If the bit we are looking for was already set, 2517 * clear that bit in the pte. 2518 */ 2519 if (pvo->pvo_pte.pte_lo & ptebit) 2520 pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit); 2521 } 2522 rv |= pvo->pvo_pte.pte_lo & (PTE_CHG|PTE_REF); 2523 pvo->pvo_pte.pte_lo &= ~ptebit; 2524 PMAP_PVO_CHECK(pvo); /* sanity check */ 2525 } 2526 pmap_interrupts_restore(msr); 2527 2528 /* 2529 * If we are clearing the modify bit and this page was marked EXEC 2530 * and the user of the page thinks the page was modified, then we 2531 * need to clean it from the icache if it's mapped or clear the EXEC 2532 * bit if it's not mapped. The page itself might not have the CHG 2533 * bit set if the modification was done via DMA to the page. 2534 */ 2535 if ((ptebit & PTE_CHG) && (rv & PTE_EXEC)) { 2536 if (LIST_EMPTY(pvoh)) { 2537 DPRINTFN(EXEC, "[pmap_clear_bit: %#" _PRIxpa ": clear-exec]\n", 2538 VM_PAGE_TO_PHYS(pg)); 2539 pmap_attr_clear(pg, PTE_EXEC); 2540 PMAPCOUNT(exec_uncached_clear_modify); 2541 } else { 2542 DPRINTFN(EXEC, "[pmap_clear_bit: %#" _PRIxpa ": syncicache]\n", 2543 VM_PAGE_TO_PHYS(pg)); 2544 pmap_syncicache(VM_PAGE_TO_PHYS(pg), PAGE_SIZE); 2545 PMAPCOUNT(exec_synced_clear_modify); 2546 } 2547 } 2548 PMAP_UNLOCK(); 2549 return (rv & ptebit) != 0; 2550 } 2551 2552 void 2553 pmap_procwr(struct proc *p, vaddr_t va, size_t len) 2554 { 2555 struct pvo_entry *pvo; 2556 size_t offset = va & ADDR_POFF; 2557 int s; 2558 2559 PMAP_LOCK(); 2560 s = splvm(); 2561 while (len > 0) { 2562 size_t seglen = PAGE_SIZE - offset; 2563 if (seglen > len) 2564 seglen = len; 2565 pvo = pmap_pvo_find_va(p->p_vmspace->vm_map.pmap, va, NULL); 2566 if (pvo != NULL && PVO_EXECUTABLE_P(pvo)) { 2567 pmap_syncicache( 2568 (pvo->pvo_pte.pte_lo & PTE_RPGN) | offset, seglen); 2569 PMAP_PVO_CHECK(pvo); 2570 } 2571 va += seglen; 2572 len -= seglen; 2573 offset = 0; 2574 } 2575 splx(s); 2576 PMAP_UNLOCK(); 2577 } 2578 2579 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 2580 void 2581 pmap_pte_print(volatile struct pte *pt) 2582 { 2583 printf("PTE %p: ", pt); 2584 2585 #if defined(PMAP_OEA) 2586 /* High word: */ 2587 printf("%#" _PRIxpte ": [", pt->pte_hi); 2588 #else 2589 printf("%#" _PRIxpte ": [", pt->pte_hi); 2590 #endif /* PMAP_OEA */ 2591 2592 printf("%c ", (pt->pte_hi & PTE_VALID) ? 'v' : 'i'); 2593 printf("%c ", (pt->pte_hi & PTE_HID) ? 'h' : '-'); 2594 2595 printf("%#" _PRIxpte " %#" _PRIxpte "", 2596 (pt->pte_hi &~ PTE_VALID)>>PTE_VSID_SHFT, 2597 pt->pte_hi & PTE_API); 2598 #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE) 2599 printf(" (va %#" _PRIxva ")] ", pmap_pte_to_va(pt)); 2600 #else 2601 printf(" (va %#" _PRIxva ")] ", pmap_pte_to_va(pt)); 2602 #endif /* PMAP_OEA */ 2603 2604 /* Low word: */ 2605 #if defined (PMAP_OEA) 2606 printf(" %#" _PRIxpte ": [", pt->pte_lo); 2607 printf("%#" _PRIxpte "... ", pt->pte_lo >> 12); 2608 #else 2609 printf(" %#" _PRIxpte ": [", pt->pte_lo); 2610 printf("%#" _PRIxpte "... ", pt->pte_lo >> 12); 2611 #endif 2612 printf("%c ", (pt->pte_lo & PTE_REF) ? 'r' : 'u'); 2613 printf("%c ", (pt->pte_lo & PTE_CHG) ? 'c' : 'n'); 2614 printf("%c", (pt->pte_lo & PTE_W) ? 'w' : '.'); 2615 printf("%c", (pt->pte_lo & PTE_I) ? 'i' : '.'); 2616 printf("%c", (pt->pte_lo & PTE_M) ? 'm' : '.'); 2617 printf("%c ", (pt->pte_lo & PTE_G) ? 'g' : '.'); 2618 switch (pt->pte_lo & PTE_PP) { 2619 case PTE_BR: printf("br]\n"); break; 2620 case PTE_BW: printf("bw]\n"); break; 2621 case PTE_SO: printf("so]\n"); break; 2622 case PTE_SW: printf("sw]\n"); break; 2623 } 2624 } 2625 #endif 2626 2627 #if defined(DDB) 2628 void 2629 pmap_pteg_check(void) 2630 { 2631 volatile struct pte *pt; 2632 int i; 2633 int ptegidx; 2634 u_int p_valid = 0; 2635 u_int s_valid = 0; 2636 u_int invalid = 0; 2637 2638 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) { 2639 for (pt = pmap_pteg_table[ptegidx].pt, i = 8; --i >= 0; pt++) { 2640 if (pt->pte_hi & PTE_VALID) { 2641 if (pt->pte_hi & PTE_HID) 2642 s_valid++; 2643 else 2644 { 2645 p_valid++; 2646 } 2647 } else 2648 invalid++; 2649 } 2650 } 2651 printf("pteg_check: v(p) %#x (%d), v(s) %#x (%d), i %#x (%d)\n", 2652 p_valid, p_valid, s_valid, s_valid, 2653 invalid, invalid); 2654 } 2655 2656 void 2657 pmap_print_mmuregs(void) 2658 { 2659 int i; 2660 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 2661 u_int cpuvers; 2662 #endif 2663 #ifndef PMAP_OEA64 2664 vaddr_t addr; 2665 register_t soft_sr[16]; 2666 #endif 2667 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 2668 struct bat soft_ibat[4]; 2669 struct bat soft_dbat[4]; 2670 #endif 2671 paddr_t sdr1; 2672 2673 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 2674 cpuvers = MFPVR() >> 16; 2675 #endif 2676 __asm volatile ("mfsdr1 %0" : "=r"(sdr1)); 2677 #ifndef PMAP_OEA64 2678 addr = 0; 2679 for (i = 0; i < 16; i++) { 2680 soft_sr[i] = MFSRIN(addr); 2681 addr += (1 << ADDR_SR_SHFT); 2682 } 2683 #endif 2684 2685 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 2686 /* read iBAT (601: uBAT) registers */ 2687 __asm volatile ("mfibatu %0,0" : "=r"(soft_ibat[0].batu)); 2688 __asm volatile ("mfibatl %0,0" : "=r"(soft_ibat[0].batl)); 2689 __asm volatile ("mfibatu %0,1" : "=r"(soft_ibat[1].batu)); 2690 __asm volatile ("mfibatl %0,1" : "=r"(soft_ibat[1].batl)); 2691 __asm volatile ("mfibatu %0,2" : "=r"(soft_ibat[2].batu)); 2692 __asm volatile ("mfibatl %0,2" : "=r"(soft_ibat[2].batl)); 2693 __asm volatile ("mfibatu %0,3" : "=r"(soft_ibat[3].batu)); 2694 __asm volatile ("mfibatl %0,3" : "=r"(soft_ibat[3].batl)); 2695 2696 2697 if (cpuvers != MPC601) { 2698 /* read dBAT registers */ 2699 __asm volatile ("mfdbatu %0,0" : "=r"(soft_dbat[0].batu)); 2700 __asm volatile ("mfdbatl %0,0" : "=r"(soft_dbat[0].batl)); 2701 __asm volatile ("mfdbatu %0,1" : "=r"(soft_dbat[1].batu)); 2702 __asm volatile ("mfdbatl %0,1" : "=r"(soft_dbat[1].batl)); 2703 __asm volatile ("mfdbatu %0,2" : "=r"(soft_dbat[2].batu)); 2704 __asm volatile ("mfdbatl %0,2" : "=r"(soft_dbat[2].batl)); 2705 __asm volatile ("mfdbatu %0,3" : "=r"(soft_dbat[3].batu)); 2706 __asm volatile ("mfdbatl %0,3" : "=r"(soft_dbat[3].batl)); 2707 } 2708 #endif 2709 2710 printf("SDR1:\t%#" _PRIxpa "\n", sdr1); 2711 #ifndef PMAP_OEA64 2712 printf("SR[]:\t"); 2713 for (i = 0; i < 4; i++) 2714 printf("0x%08lx, ", soft_sr[i]); 2715 printf("\n\t"); 2716 for ( ; i < 8; i++) 2717 printf("0x%08lx, ", soft_sr[i]); 2718 printf("\n\t"); 2719 for ( ; i < 12; i++) 2720 printf("0x%08lx, ", soft_sr[i]); 2721 printf("\n\t"); 2722 for ( ; i < 16; i++) 2723 printf("0x%08lx, ", soft_sr[i]); 2724 printf("\n"); 2725 #endif 2726 2727 #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE) 2728 printf("%cBAT[]:\t", cpuvers == MPC601 ? 'u' : 'i'); 2729 for (i = 0; i < 4; i++) { 2730 printf("0x%08lx 0x%08lx, ", 2731 soft_ibat[i].batu, soft_ibat[i].batl); 2732 if (i == 1) 2733 printf("\n\t"); 2734 } 2735 if (cpuvers != MPC601) { 2736 printf("\ndBAT[]:\t"); 2737 for (i = 0; i < 4; i++) { 2738 printf("0x%08lx 0x%08lx, ", 2739 soft_dbat[i].batu, soft_dbat[i].batl); 2740 if (i == 1) 2741 printf("\n\t"); 2742 } 2743 } 2744 printf("\n"); 2745 #endif /* PMAP_OEA... */ 2746 } 2747 2748 void 2749 pmap_print_pte(pmap_t pm, vaddr_t va) 2750 { 2751 struct pvo_entry *pvo; 2752 volatile struct pte *pt; 2753 int pteidx; 2754 2755 pvo = pmap_pvo_find_va(pm, va, &pteidx); 2756 if (pvo != NULL) { 2757 pt = pmap_pvo_to_pte(pvo, pteidx); 2758 if (pt != NULL) { 2759 printf("VA %#" _PRIxva " -> %p -> %s %#" _PRIxpte ", %#" _PRIxpte "\n", 2760 va, pt, 2761 pt->pte_hi & PTE_HID ? "(sec)" : "(pri)", 2762 pt->pte_hi, pt->pte_lo); 2763 } else { 2764 printf("No valid PTE found\n"); 2765 } 2766 } else { 2767 printf("Address not in pmap\n"); 2768 } 2769 } 2770 2771 void 2772 pmap_pteg_dist(void) 2773 { 2774 struct pvo_entry *pvo; 2775 int ptegidx; 2776 int depth; 2777 int max_depth = 0; 2778 unsigned int depths[64]; 2779 2780 memset(depths, 0, sizeof(depths)); 2781 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) { 2782 depth = 0; 2783 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 2784 depth++; 2785 } 2786 if (depth > max_depth) 2787 max_depth = depth; 2788 if (depth > 63) 2789 depth = 63; 2790 depths[depth]++; 2791 } 2792 2793 for (depth = 0; depth < 64; depth++) { 2794 printf(" [%2d]: %8u", depth, depths[depth]); 2795 if ((depth & 3) == 3) 2796 printf("\n"); 2797 if (depth == max_depth) 2798 break; 2799 } 2800 if ((depth & 3) != 3) 2801 printf("\n"); 2802 printf("Max depth found was %d\n", max_depth); 2803 } 2804 #endif /* DEBUG */ 2805 2806 #if defined(PMAPCHECK) || defined(DEBUG) 2807 void 2808 pmap_pvo_verify(void) 2809 { 2810 int ptegidx; 2811 int s; 2812 2813 s = splvm(); 2814 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) { 2815 struct pvo_entry *pvo; 2816 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 2817 if ((uintptr_t) pvo >= SEGMENT_LENGTH) 2818 panic("pmap_pvo_verify: invalid pvo %p " 2819 "on list %#x", pvo, ptegidx); 2820 pmap_pvo_check(pvo); 2821 } 2822 } 2823 splx(s); 2824 } 2825 #endif /* PMAPCHECK */ 2826 2827 2828 void * 2829 pmap_pool_ualloc(struct pool *pp, int flags) 2830 { 2831 struct pvo_page *pvop; 2832 2833 if (uvm.page_init_done != true) { 2834 return (void *) uvm_pageboot_alloc(PAGE_SIZE); 2835 } 2836 2837 PMAP_LOCK(); 2838 pvop = SIMPLEQ_FIRST(&pmap_upvop_head); 2839 if (pvop != NULL) { 2840 pmap_upvop_free--; 2841 SIMPLEQ_REMOVE_HEAD(&pmap_upvop_head, pvop_link); 2842 PMAP_UNLOCK(); 2843 return pvop; 2844 } 2845 PMAP_UNLOCK(); 2846 return pmap_pool_malloc(pp, flags); 2847 } 2848 2849 void * 2850 pmap_pool_malloc(struct pool *pp, int flags) 2851 { 2852 struct pvo_page *pvop; 2853 struct vm_page *pg; 2854 2855 PMAP_LOCK(); 2856 pvop = SIMPLEQ_FIRST(&pmap_mpvop_head); 2857 if (pvop != NULL) { 2858 pmap_mpvop_free--; 2859 SIMPLEQ_REMOVE_HEAD(&pmap_mpvop_head, pvop_link); 2860 PMAP_UNLOCK(); 2861 return pvop; 2862 } 2863 PMAP_UNLOCK(); 2864 again: 2865 pg = uvm_pagealloc_strat(NULL, 0, NULL, UVM_PGA_USERESERVE, 2866 UVM_PGA_STRAT_ONLY, VM_FREELIST_FIRST256); 2867 if (__predict_false(pg == NULL)) { 2868 if (flags & PR_WAITOK) { 2869 uvm_wait("plpg"); 2870 goto again; 2871 } else { 2872 return (0); 2873 } 2874 } 2875 KDASSERT(VM_PAGE_TO_PHYS(pg) == (uintptr_t)VM_PAGE_TO_PHYS(pg)); 2876 return (void *)(uintptr_t) VM_PAGE_TO_PHYS(pg); 2877 } 2878 2879 void 2880 pmap_pool_ufree(struct pool *pp, void *va) 2881 { 2882 struct pvo_page *pvop; 2883 #if 0 2884 if (PHYS_TO_VM_PAGE((paddr_t) va) != NULL) { 2885 pmap_pool_mfree(va, size, tag); 2886 return; 2887 } 2888 #endif 2889 PMAP_LOCK(); 2890 pvop = va; 2891 SIMPLEQ_INSERT_HEAD(&pmap_upvop_head, pvop, pvop_link); 2892 pmap_upvop_free++; 2893 if (pmap_upvop_free > pmap_upvop_maxfree) 2894 pmap_upvop_maxfree = pmap_upvop_free; 2895 PMAP_UNLOCK(); 2896 } 2897 2898 void 2899 pmap_pool_mfree(struct pool *pp, void *va) 2900 { 2901 struct pvo_page *pvop; 2902 2903 PMAP_LOCK(); 2904 pvop = va; 2905 SIMPLEQ_INSERT_HEAD(&pmap_mpvop_head, pvop, pvop_link); 2906 pmap_mpvop_free++; 2907 if (pmap_mpvop_free > pmap_mpvop_maxfree) 2908 pmap_mpvop_maxfree = pmap_mpvop_free; 2909 PMAP_UNLOCK(); 2910 #if 0 2911 uvm_pagefree(PHYS_TO_VM_PAGE((paddr_t) va)); 2912 #endif 2913 } 2914 2915 /* 2916 * This routine in bootstraping to steal to-be-managed memory (which will 2917 * then be unmanaged). We use it to grab from the first 256MB for our 2918 * pmap needs and above 256MB for other stuff. 2919 */ 2920 vaddr_t 2921 pmap_steal_memory(vsize_t vsize, vaddr_t *vstartp, vaddr_t *vendp) 2922 { 2923 vsize_t size; 2924 vaddr_t va; 2925 paddr_t start, end, pa = 0; 2926 int npgs, freelist; 2927 uvm_physseg_t bank; 2928 2929 if (uvm.page_init_done == true) 2930 panic("pmap_steal_memory: called _after_ bootstrap"); 2931 2932 *vstartp = VM_MIN_KERNEL_ADDRESS; 2933 *vendp = VM_MAX_KERNEL_ADDRESS; 2934 2935 size = round_page(vsize); 2936 npgs = atop(size); 2937 2938 /* 2939 * PA 0 will never be among those given to UVM so we can use it 2940 * to indicate we couldn't steal any memory. 2941 */ 2942 2943 for (bank = uvm_physseg_get_first(); 2944 uvm_physseg_valid_p(bank); 2945 bank = uvm_physseg_get_next(bank)) { 2946 2947 freelist = uvm_physseg_get_free_list(bank); 2948 start = uvm_physseg_get_start(bank); 2949 end = uvm_physseg_get_end(bank); 2950 2951 if (freelist == VM_FREELIST_FIRST256 && 2952 (end - start) >= npgs) { 2953 pa = ptoa(start); 2954 break; 2955 } 2956 } 2957 2958 if (pa == 0) 2959 panic("pmap_steal_memory: no approriate memory to steal!"); 2960 2961 uvm_physseg_unplug(start, npgs); 2962 2963 va = (vaddr_t) pa; 2964 memset((void *) va, 0, size); 2965 pmap_pages_stolen += npgs; 2966 #ifdef DEBUG 2967 if (pmapdebug && npgs > 1) { 2968 u_int cnt = 0; 2969 for (bank = uvm_physseg_get_first(); 2970 uvm_physseg_valid_p(bank); 2971 bank = uvm_physseg_get_next(bank)) { 2972 cnt += uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank); 2973 } 2974 printf("pmap_steal_memory: stole %u (total %u) pages (%u left)\n", 2975 npgs, pmap_pages_stolen, cnt); 2976 } 2977 #endif 2978 2979 return va; 2980 } 2981 2982 /* 2983 * Find a chuck of memory with right size and alignment. 2984 */ 2985 paddr_t 2986 pmap_boot_find_memory(psize_t size, psize_t alignment, int at_end) 2987 { 2988 struct mem_region *mp; 2989 paddr_t s, e; 2990 int i, j; 2991 2992 size = round_page(size); 2993 2994 DPRINTFN(BOOT, 2995 "pmap_boot_find_memory: size=%#" _PRIxpa ", alignment=%#" _PRIxpa ", at_end=%d", 2996 size, alignment, at_end); 2997 2998 if (alignment < PAGE_SIZE || (alignment & (alignment-1)) != 0) 2999 panic("pmap_boot_find_memory: invalid alignment %#" _PRIxpa, 3000 alignment); 3001 3002 if (at_end) { 3003 if (alignment != PAGE_SIZE) 3004 panic("pmap_boot_find_memory: invalid ending " 3005 "alignment %#" _PRIxpa, alignment); 3006 3007 for (mp = &avail[avail_cnt-1]; mp >= avail; mp--) { 3008 s = mp->start + mp->size - size; 3009 if (s >= mp->start && mp->size >= size) { 3010 DPRINTFN(BOOT, ": %#" _PRIxpa "\n", s); 3011 DPRINTFN(BOOT, 3012 "pmap_boot_find_memory: b-avail[%d] start " 3013 "%#" _PRIxpa " size %#" _PRIxpa "\n", mp - avail, 3014 mp->start, mp->size); 3015 mp->size -= size; 3016 DPRINTFN(BOOT, 3017 "pmap_boot_find_memory: a-avail[%d] start " 3018 "%#" _PRIxpa " size %#" _PRIxpa "\n", mp - avail, 3019 mp->start, mp->size); 3020 return s; 3021 } 3022 } 3023 panic("pmap_boot_find_memory: no available memory"); 3024 } 3025 3026 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) { 3027 s = (mp->start + alignment - 1) & ~(alignment-1); 3028 e = s + size; 3029 3030 /* 3031 * Is the calculated region entirely within the region? 3032 */ 3033 if (s < mp->start || e > mp->start + mp->size) 3034 continue; 3035 3036 DPRINTFN(BOOT, ": %#" _PRIxpa "\n", s); 3037 if (s == mp->start) { 3038 /* 3039 * If the block starts at the beginning of region, 3040 * adjust the size & start. (the region may now be 3041 * zero in length) 3042 */ 3043 DPRINTFN(BOOT, 3044 "pmap_boot_find_memory: b-avail[%d] start " 3045 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size); 3046 mp->start += size; 3047 mp->size -= size; 3048 DPRINTFN(BOOT, 3049 "pmap_boot_find_memory: a-avail[%d] start " 3050 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size); 3051 } else if (e == mp->start + mp->size) { 3052 /* 3053 * If the block starts at the beginning of region, 3054 * adjust only the size. 3055 */ 3056 DPRINTFN(BOOT, 3057 "pmap_boot_find_memory: b-avail[%d] start " 3058 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size); 3059 mp->size -= size; 3060 DPRINTFN(BOOT, 3061 "pmap_boot_find_memory: a-avail[%d] start " 3062 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size); 3063 } else { 3064 /* 3065 * Block is in the middle of the region, so we 3066 * have to split it in two. 3067 */ 3068 for (j = avail_cnt; j > i + 1; j--) { 3069 avail[j] = avail[j-1]; 3070 } 3071 DPRINTFN(BOOT, 3072 "pmap_boot_find_memory: b-avail[%d] start " 3073 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size); 3074 mp[1].start = e; 3075 mp[1].size = mp[0].start + mp[0].size - e; 3076 mp[0].size = s - mp[0].start; 3077 avail_cnt++; 3078 for (; i < avail_cnt; i++) { 3079 DPRINTFN(BOOT, 3080 "pmap_boot_find_memory: a-avail[%d] " 3081 "start %#" _PRIxpa " size %#" _PRIxpa "\n", i, 3082 avail[i].start, avail[i].size); 3083 } 3084 } 3085 KASSERT(s == (uintptr_t) s); 3086 return s; 3087 } 3088 panic("pmap_boot_find_memory: not enough memory for " 3089 "%#" _PRIxpa "/%#" _PRIxpa " allocation?", size, alignment); 3090 } 3091 3092 /* XXXSL: we dont have any BATs to do this, map in Segment 0 1:1 using page tables */ 3093 #if defined (PMAP_OEA64_BRIDGE) 3094 int 3095 pmap_setup_segment0_map(int use_large_pages, ...) 3096 { 3097 vaddr_t va, va_end; 3098 3099 register_t pte_lo = 0x0; 3100 int ptegidx = 0; 3101 struct pte pte; 3102 va_list ap; 3103 3104 /* Coherent + Supervisor RW, no user access */ 3105 pte_lo = PTE_M; 3106 3107 /* XXXSL 3108 * Map in 1st segment 1:1, we'll be careful not to spill kernel entries later, 3109 * these have to take priority. 3110 */ 3111 for (va = 0x0; va < SEGMENT_LENGTH; va += 0x1000) { 3112 ptegidx = va_to_pteg(pmap_kernel(), va); 3113 pmap_pte_create(&pte, pmap_kernel(), va, va | pte_lo); 3114 (void)pmap_pte_insert(ptegidx, &pte); 3115 } 3116 3117 va_start(ap, use_large_pages); 3118 while (1) { 3119 paddr_t pa; 3120 size_t size; 3121 3122 va = va_arg(ap, vaddr_t); 3123 3124 if (va == 0) 3125 break; 3126 3127 pa = va_arg(ap, paddr_t); 3128 size = va_arg(ap, size_t); 3129 3130 for (va_end = va + size; va < va_end; va += 0x1000, pa += 0x1000) { 3131 #if 0 3132 printf("%s: Inserting: va: %#" _PRIxva ", pa: %#" _PRIxpa "\n", __func__, va, pa); 3133 #endif 3134 ptegidx = va_to_pteg(pmap_kernel(), va); 3135 pmap_pte_create(&pte, pmap_kernel(), va, pa | pte_lo); 3136 (void)pmap_pte_insert(ptegidx, &pte); 3137 } 3138 } 3139 va_end(ap); 3140 3141 TLBSYNC(); 3142 SYNC(); 3143 return (0); 3144 } 3145 #endif /* PMAP_OEA64_BRIDGE */ 3146 3147 /* 3148 * This is not part of the defined PMAP interface and is specific to the 3149 * PowerPC architecture. This is called during initppc, before the system 3150 * is really initialized. 3151 */ 3152 void 3153 pmap_bootstrap(paddr_t kernelstart, paddr_t kernelend) 3154 { 3155 struct mem_region *mp, tmp; 3156 paddr_t s, e; 3157 psize_t size; 3158 int i, j; 3159 3160 /* 3161 * Get memory. 3162 */ 3163 mem_regions(&mem, &avail); 3164 #if defined(DEBUG) 3165 if (pmapdebug & PMAPDEBUG_BOOT) { 3166 printf("pmap_bootstrap: memory configuration:\n"); 3167 for (mp = mem; mp->size; mp++) { 3168 printf("pmap_bootstrap: mem start %#" _PRIxpa " size %#" _PRIxpa "\n", 3169 mp->start, mp->size); 3170 } 3171 for (mp = avail; mp->size; mp++) { 3172 printf("pmap_bootstrap: avail start %#" _PRIxpa " size %#" _PRIxpa "\n", 3173 mp->start, mp->size); 3174 } 3175 } 3176 #endif 3177 3178 /* 3179 * Find out how much physical memory we have and in how many chunks. 3180 */ 3181 for (mem_cnt = 0, mp = mem; mp->size; mp++) { 3182 if (mp->start >= pmap_memlimit) 3183 continue; 3184 if (mp->start + mp->size > pmap_memlimit) { 3185 size = pmap_memlimit - mp->start; 3186 physmem += btoc(size); 3187 } else { 3188 physmem += btoc(mp->size); 3189 } 3190 mem_cnt++; 3191 } 3192 3193 /* 3194 * Count the number of available entries. 3195 */ 3196 for (avail_cnt = 0, mp = avail; mp->size; mp++) 3197 avail_cnt++; 3198 3199 /* 3200 * Page align all regions. 3201 */ 3202 kernelstart = trunc_page(kernelstart); 3203 kernelend = round_page(kernelend); 3204 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) { 3205 s = round_page(mp->start); 3206 mp->size -= (s - mp->start); 3207 mp->size = trunc_page(mp->size); 3208 mp->start = s; 3209 e = mp->start + mp->size; 3210 3211 DPRINTFN(BOOT, 3212 "pmap_bootstrap: b-avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n", 3213 i, mp->start, mp->size); 3214 3215 /* 3216 * Don't allow the end to run beyond our artificial limit 3217 */ 3218 if (e > pmap_memlimit) 3219 e = pmap_memlimit; 3220 3221 /* 3222 * Is this region empty or strange? skip it. 3223 */ 3224 if (e <= s) { 3225 mp->start = 0; 3226 mp->size = 0; 3227 continue; 3228 } 3229 3230 /* 3231 * Does this overlap the beginning of kernel? 3232 * Does extend past the end of the kernel? 3233 */ 3234 else if (s < kernelstart && e > kernelstart) { 3235 if (e > kernelend) { 3236 avail[avail_cnt].start = kernelend; 3237 avail[avail_cnt].size = e - kernelend; 3238 avail_cnt++; 3239 } 3240 mp->size = kernelstart - s; 3241 } 3242 /* 3243 * Check whether this region overlaps the end of the kernel. 3244 */ 3245 else if (s < kernelend && e > kernelend) { 3246 mp->start = kernelend; 3247 mp->size = e - kernelend; 3248 } 3249 /* 3250 * Look whether this regions is completely inside the kernel. 3251 * Nuke it if it does. 3252 */ 3253 else if (s >= kernelstart && e <= kernelend) { 3254 mp->start = 0; 3255 mp->size = 0; 3256 } 3257 /* 3258 * If the user imposed a memory limit, enforce it. 3259 */ 3260 else if (s >= pmap_memlimit) { 3261 mp->start = -PAGE_SIZE; /* let's know why */ 3262 mp->size = 0; 3263 } 3264 else { 3265 mp->start = s; 3266 mp->size = e - s; 3267 } 3268 DPRINTFN(BOOT, 3269 "pmap_bootstrap: a-avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n", 3270 i, mp->start, mp->size); 3271 } 3272 3273 /* 3274 * Move (and uncount) all the null return to the end. 3275 */ 3276 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) { 3277 if (mp->size == 0) { 3278 tmp = avail[i]; 3279 avail[i] = avail[--avail_cnt]; 3280 avail[avail_cnt] = avail[i]; 3281 } 3282 } 3283 3284 /* 3285 * (Bubble)sort them into ascending order. 3286 */ 3287 for (i = 0; i < avail_cnt; i++) { 3288 for (j = i + 1; j < avail_cnt; j++) { 3289 if (avail[i].start > avail[j].start) { 3290 tmp = avail[i]; 3291 avail[i] = avail[j]; 3292 avail[j] = tmp; 3293 } 3294 } 3295 } 3296 3297 /* 3298 * Make sure they don't overlap. 3299 */ 3300 for (mp = avail, i = 0; i < avail_cnt - 1; i++, mp++) { 3301 if (mp[0].start + mp[0].size > mp[1].start) { 3302 mp[0].size = mp[1].start - mp[0].start; 3303 } 3304 DPRINTFN(BOOT, 3305 "pmap_bootstrap: avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n", 3306 i, mp->start, mp->size); 3307 } 3308 DPRINTFN(BOOT, 3309 "pmap_bootstrap: avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n", 3310 i, mp->start, mp->size); 3311 3312 #ifdef PTEGCOUNT 3313 pmap_pteg_cnt = PTEGCOUNT; 3314 #else /* PTEGCOUNT */ 3315 3316 pmap_pteg_cnt = 0x1000; 3317 3318 while (pmap_pteg_cnt < physmem) 3319 pmap_pteg_cnt <<= 1; 3320 3321 pmap_pteg_cnt >>= 1; 3322 #endif /* PTEGCOUNT */ 3323 3324 #ifdef DEBUG 3325 DPRINTFN(BOOT, "pmap_pteg_cnt: 0x%x\n", pmap_pteg_cnt); 3326 #endif 3327 3328 /* 3329 * Find suitably aligned memory for PTEG hash table. 3330 */ 3331 size = pmap_pteg_cnt * sizeof(struct pteg); 3332 pmap_pteg_table = (void *)(uintptr_t) pmap_boot_find_memory(size, size, 0); 3333 3334 #ifdef DEBUG 3335 DPRINTFN(BOOT, 3336 "PTEG cnt: 0x%x HTAB size: 0x%08x bytes, address: %p\n", pmap_pteg_cnt, (unsigned int)size, pmap_pteg_table); 3337 #endif 3338 3339 3340 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 3341 if ( (uintptr_t) pmap_pteg_table + size > SEGMENT_LENGTH) 3342 panic("pmap_bootstrap: pmap_pteg_table end (%p + %#" _PRIxpa ") > 256MB", 3343 pmap_pteg_table, size); 3344 #endif 3345 3346 memset(__UNVOLATILE(pmap_pteg_table), 0, 3347 pmap_pteg_cnt * sizeof(struct pteg)); 3348 pmap_pteg_mask = pmap_pteg_cnt - 1; 3349 3350 /* 3351 * We cannot do pmap_steal_memory here since UVM hasn't been loaded 3352 * with pages. So we just steal them before giving them to UVM. 3353 */ 3354 size = sizeof(pmap_pvo_table[0]) * pmap_pteg_cnt; 3355 pmap_pvo_table = (void *)(uintptr_t) pmap_boot_find_memory(size, PAGE_SIZE, 0); 3356 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 3357 if ( (uintptr_t) pmap_pvo_table + size > SEGMENT_LENGTH) 3358 panic("pmap_bootstrap: pmap_pvo_table end (%p + %#" _PRIxpa ") > 256MB", 3359 pmap_pvo_table, size); 3360 #endif 3361 3362 for (i = 0; i < pmap_pteg_cnt; i++) 3363 TAILQ_INIT(&pmap_pvo_table[i]); 3364 3365 #ifndef MSGBUFADDR 3366 /* 3367 * Allocate msgbuf in high memory. 3368 */ 3369 msgbuf_paddr = pmap_boot_find_memory(MSGBUFSIZE, PAGE_SIZE, 1); 3370 #endif 3371 3372 for (mp = avail, i = 0; i < avail_cnt; mp++, i++) { 3373 paddr_t pfstart = atop(mp->start); 3374 paddr_t pfend = atop(mp->start + mp->size); 3375 if (mp->size == 0) 3376 continue; 3377 if (mp->start + mp->size <= SEGMENT_LENGTH) { 3378 uvm_page_physload(pfstart, pfend, pfstart, pfend, 3379 VM_FREELIST_FIRST256); 3380 } else if (mp->start >= SEGMENT_LENGTH) { 3381 uvm_page_physload(pfstart, pfend, pfstart, pfend, 3382 VM_FREELIST_DEFAULT); 3383 } else { 3384 pfend = atop(SEGMENT_LENGTH); 3385 uvm_page_physload(pfstart, pfend, pfstart, pfend, 3386 VM_FREELIST_FIRST256); 3387 pfstart = atop(SEGMENT_LENGTH); 3388 pfend = atop(mp->start + mp->size); 3389 uvm_page_physload(pfstart, pfend, pfstart, pfend, 3390 VM_FREELIST_DEFAULT); 3391 } 3392 } 3393 3394 /* 3395 * Make sure kernel vsid is allocated as well as VSID 0. 3396 */ 3397 pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS-1)) / VSID_NBPW] 3398 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 3399 pmap_vsid_bitmap[(PHYSMAP_VSIDBITS & (NPMAPS-1)) / VSID_NBPW] 3400 |= 1 << (PHYSMAP_VSIDBITS % VSID_NBPW); 3401 pmap_vsid_bitmap[0] |= 1; 3402 3403 /* 3404 * Initialize kernel pmap and hardware. 3405 */ 3406 3407 /* PMAP_OEA64_BRIDGE does support these instructions */ 3408 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 3409 for (i = 0; i < 16; i++) { 3410 #if defined(PPC_OEA601) 3411 /* XXX wedges for segment register 0xf , so set later */ 3412 if ((iosrtable[i] & SR601_T) && ((MFPVR() >> 16) == MPC601)) 3413 continue; 3414 #endif 3415 pmap_kernel()->pm_sr[i] = KERNELN_SEGMENT(i)|SR_PRKEY; 3416 __asm volatile ("mtsrin %0,%1" 3417 :: "r"(KERNELN_SEGMENT(i)|SR_PRKEY), "r"(i << ADDR_SR_SHFT)); 3418 } 3419 3420 pmap_kernel()->pm_sr[KERNEL_SR] = KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY; 3421 __asm volatile ("mtsr %0,%1" 3422 :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT)); 3423 #ifdef KERNEL2_SR 3424 pmap_kernel()->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT|SR_SUKEY|SR_PRKEY; 3425 __asm volatile ("mtsr %0,%1" 3426 :: "n"(KERNEL2_SR), "r"(KERNEL2_SEGMENT)); 3427 #endif 3428 #endif /* PMAP_OEA || PMAP_OEA64_BRIDGE */ 3429 #if defined (PMAP_OEA) 3430 for (i = 0; i < 16; i++) { 3431 if (iosrtable[i] & SR601_T) { 3432 pmap_kernel()->pm_sr[i] = iosrtable[i]; 3433 __asm volatile ("mtsrin %0,%1" 3434 :: "r"(iosrtable[i]), "r"(i << ADDR_SR_SHFT)); 3435 } 3436 } 3437 __asm volatile ("sync; mtsdr1 %0; isync" 3438 :: "r"((uintptr_t)pmap_pteg_table | (pmap_pteg_mask >> 10))); 3439 #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE) 3440 __asm __volatile ("sync; mtsdr1 %0; isync" 3441 :: "r"((uintptr_t)pmap_pteg_table | (32 - __builtin_clz(pmap_pteg_mask >> 11)))); 3442 #endif 3443 tlbia(); 3444 3445 #ifdef ALTIVEC 3446 pmap_use_altivec = cpu_altivec; 3447 #endif 3448 3449 #ifdef DEBUG 3450 if (pmapdebug & PMAPDEBUG_BOOT) { 3451 u_int cnt; 3452 uvm_physseg_t bank; 3453 char pbuf[9]; 3454 for (cnt = 0, bank = uvm_physseg_get_first(); 3455 uvm_physseg_valid_p(bank); 3456 bank = uvm_physseg_get_next(bank)) { 3457 cnt += uvm_physseg_get_avail_end(bank) - 3458 uvm_physseg_get_avail_start(bank); 3459 printf("pmap_bootstrap: vm_physmem[%d]=%#" _PRIxpa "-%#" _PRIxpa "/%#" _PRIxpa "\n", 3460 bank, 3461 ptoa(uvm_physseg_get_avail_start(bank)), 3462 ptoa(uvm_physseg_get_avail_end(bank)), 3463 ptoa(uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank))); 3464 } 3465 format_bytes(pbuf, sizeof(pbuf), ptoa((u_int64_t) cnt)); 3466 printf("pmap_bootstrap: UVM memory = %s (%u pages)\n", 3467 pbuf, cnt); 3468 } 3469 #endif 3470 3471 pool_init(&pmap_upvo_pool, sizeof(struct pvo_entry), 3472 sizeof(struct pvo_entry), 0, 0, "pmap_upvopl", 3473 &pmap_pool_uallocator, IPL_VM); 3474 3475 pool_setlowat(&pmap_upvo_pool, 252); 3476 3477 pool_init(&pmap_pool, sizeof(struct pmap), 3478 sizeof(void *), 0, 0, "pmap_pl", &pmap_pool_uallocator, 3479 IPL_NONE); 3480 3481 #if defined(PMAP_NEED_MAPKERNEL) 3482 { 3483 struct pmap *pm = pmap_kernel(); 3484 #if defined(PMAP_NEED_FULL_MAPKERNEL) 3485 extern int etext[], kernel_text[]; 3486 vaddr_t va, va_etext = (paddr_t) etext; 3487 #endif 3488 paddr_t pa, pa_end; 3489 register_t sr; 3490 struct pte pt; 3491 unsigned int ptegidx; 3492 int bank; 3493 3494 sr = PHYSMAPN_SEGMENT(0) | SR_SUKEY|SR_PRKEY; 3495 pm->pm_sr[0] = sr; 3496 3497 for (bank = 0; bank < vm_nphysseg; bank++) { 3498 pa_end = ptoa(VM_PHYSMEM_PTR(bank)->avail_end); 3499 pa = ptoa(VM_PHYSMEM_PTR(bank)->avail_start); 3500 for (; pa < pa_end; pa += PAGE_SIZE) { 3501 ptegidx = va_to_pteg(pm, pa); 3502 pmap_pte_create(&pt, pm, pa, pa | PTE_M|PTE_BW); 3503 pmap_pte_insert(ptegidx, &pt); 3504 } 3505 } 3506 3507 #if defined(PMAP_NEED_FULL_MAPKERNEL) 3508 va = (vaddr_t) kernel_text; 3509 3510 for (pa = kernelstart; va < va_etext; 3511 pa += PAGE_SIZE, va += PAGE_SIZE) { 3512 ptegidx = va_to_pteg(pm, va); 3513 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BR); 3514 pmap_pte_insert(ptegidx, &pt); 3515 } 3516 3517 for (; pa < kernelend; 3518 pa += PAGE_SIZE, va += PAGE_SIZE) { 3519 ptegidx = va_to_pteg(pm, va); 3520 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW); 3521 pmap_pte_insert(ptegidx, &pt); 3522 } 3523 3524 for (va = 0, pa = 0; va < kernelstart; 3525 pa += PAGE_SIZE, va += PAGE_SIZE) { 3526 ptegidx = va_to_pteg(pm, va); 3527 if (va < 0x3000) 3528 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BR); 3529 else 3530 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW); 3531 pmap_pte_insert(ptegidx, &pt); 3532 } 3533 for (va = kernelend, pa = kernelend; va < SEGMENT_LENGTH; 3534 pa += PAGE_SIZE, va += PAGE_SIZE) { 3535 ptegidx = va_to_pteg(pm, va); 3536 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW); 3537 pmap_pte_insert(ptegidx, &pt); 3538 } 3539 #endif 3540 3541 __asm volatile ("mtsrin %0,%1" 3542 :: "r"(sr), "r"(kernelstart)); 3543 } 3544 #endif 3545 3546 #if defined(PMAPDEBUG) 3547 if ( pmapdebug ) 3548 pmap_print_mmuregs(); 3549 #endif 3550 } 3551