1 /* $NetBSD: pmap.c,v 1.10 2003/05/10 21:10:37 thorpej Exp $ */ 2 /*- 3 * Copyright (c) 2001 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the NetBSD 20 * Foundation, Inc. and its contributors. 21 * 4. Neither the name of The NetBSD Foundation nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 40 * Copyright (C) 1995, 1996 TooLs GmbH. 41 * All rights reserved. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. All advertising materials mentioning features or use of this software 52 * must display the following acknowledgement: 53 * This product includes software developed by TooLs GmbH. 54 * 4. The name of TooLs GmbH may not be used to endorse or promote products 55 * derived from this software without specific prior written permission. 56 * 57 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 58 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 59 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 60 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 62 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 63 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 64 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 65 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 66 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 67 */ 68 69 #include "opt_altivec.h" 70 #include "opt_pmap.h" 71 #include <sys/param.h> 72 #include <sys/malloc.h> 73 #include <sys/proc.h> 74 #include <sys/user.h> 75 #include <sys/pool.h> 76 #include <sys/queue.h> 77 #include <sys/device.h> /* for evcnt */ 78 #include <sys/systm.h> 79 80 #if __NetBSD_Version__ < 105010000 81 #include <vm/vm.h> 82 #include <vm/vm_kern.h> 83 #define splvm() splimp() 84 #endif 85 86 #include <uvm/uvm.h> 87 88 #include <machine/pcb.h> 89 #include <machine/powerpc.h> 90 #include <powerpc/spr.h> 91 #include <powerpc/oea/sr_601.h> 92 #if __NetBSD_Version__ > 105010000 93 #include <powerpc/oea/bat.h> 94 #else 95 #include <powerpc/bat.h> 96 #endif 97 98 #if defined(DEBUG) || defined(PMAPCHECK) 99 #define STATIC 100 #else 101 #define STATIC static 102 #endif 103 104 #ifdef ALTIVEC 105 int pmap_use_altivec; 106 #endif 107 108 volatile struct pteg *pmap_pteg_table; 109 unsigned int pmap_pteg_cnt; 110 unsigned int pmap_pteg_mask; 111 paddr_t pmap_memlimit = -PAGE_SIZE; /* there is no limit */ 112 113 struct pmap kernel_pmap_; 114 unsigned int pmap_pages_stolen; 115 u_long pmap_pte_valid; 116 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 117 u_long pmap_pvo_enter_depth; 118 u_long pmap_pvo_remove_depth; 119 #endif 120 121 int physmem; 122 #ifndef MSGBUFADDR 123 extern paddr_t msgbuf_paddr; 124 #endif 125 126 static struct mem_region *mem, *avail; 127 static u_int mem_cnt, avail_cnt; 128 129 #ifdef __HAVE_PMAP_PHYSSEG 130 /* 131 * This is a cache of referenced/modified bits. 132 * Bits herein are shifted by ATTRSHFT. 133 */ 134 #define ATTR_SHFT 4 135 struct pmap_physseg pmap_physseg; 136 #endif 137 138 /* 139 * The following structure is exactly 32 bytes long (one cacheline). 140 */ 141 struct pvo_entry { 142 LIST_ENTRY(pvo_entry) pvo_vlink; /* Link to common virt page */ 143 TAILQ_ENTRY(pvo_entry) pvo_olink; /* Link to overflow entry */ 144 struct pte pvo_pte; /* Prebuilt PTE */ 145 pmap_t pvo_pmap; /* ptr to owning pmap */ 146 vaddr_t pvo_vaddr; /* VA of entry */ 147 #define PVO_PTEGIDX_MASK 0x0007 /* which PTEG slot */ 148 #define PVO_PTEGIDX_VALID 0x0008 /* slot is valid */ 149 #define PVO_WIRED 0x0010 /* PVO entry is wired */ 150 #define PVO_MANAGED 0x0020 /* PVO e. for managed page */ 151 #define PVO_EXECUTABLE 0x0040 /* PVO e. for executable page */ 152 }; 153 #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) 154 #define PVO_ISEXECUTABLE(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE) 155 #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) 156 #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) 157 #define PVO_PTEGIDX_CLR(pvo) \ 158 ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) 159 #define PVO_PTEGIDX_SET(pvo,i) \ 160 ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) 161 162 TAILQ_HEAD(pvo_tqhead, pvo_entry); 163 struct pvo_tqhead *pmap_pvo_table; /* pvo entries by ptegroup index */ 164 struct pvo_head pmap_pvo_kunmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged); /* list of unmanaged pages */ 165 struct pvo_head pmap_pvo_unmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged); /* list of unmanaged pages */ 166 167 struct pool pmap_pool; /* pool for pmap structures */ 168 struct pool pmap_upvo_pool; /* pool for pvo entries for unmanaged pages */ 169 struct pool pmap_mpvo_pool; /* pool for pvo entries for managed pages */ 170 171 /* 172 * We keep a cache of unmanaged pages to be used for pvo entries for 173 * unmanaged pages. 174 */ 175 struct pvo_page { 176 SIMPLEQ_ENTRY(pvo_page) pvop_link; 177 }; 178 SIMPLEQ_HEAD(pvop_head, pvo_page); 179 struct pvop_head pmap_upvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_upvop_head); 180 struct pvop_head pmap_mpvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_mpvop_head); 181 u_long pmap_upvop_free; 182 u_long pmap_upvop_maxfree; 183 u_long pmap_mpvop_free; 184 u_long pmap_mpvop_maxfree; 185 186 STATIC void *pmap_pool_ualloc(struct pool *, int); 187 STATIC void *pmap_pool_malloc(struct pool *, int); 188 189 STATIC void pmap_pool_ufree(struct pool *, void *); 190 STATIC void pmap_pool_mfree(struct pool *, void *); 191 192 static struct pool_allocator pmap_pool_mallocator = { 193 pmap_pool_malloc, pmap_pool_mfree, 0, 194 }; 195 196 static struct pool_allocator pmap_pool_uallocator = { 197 pmap_pool_ualloc, pmap_pool_ufree, 0, 198 }; 199 200 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 201 void pmap_pte_print(volatile struct pte *); 202 #endif 203 204 #ifdef DDB 205 void pmap_pteg_check(void); 206 void pmap_pteg_dist(void); 207 void pmap_print_pte(pmap_t, vaddr_t); 208 void pmap_print_mmuregs(void); 209 #endif 210 211 #if defined(DEBUG) || defined(PMAPCHECK) 212 #ifdef PMAPCHECK 213 int pmapcheck = 1; 214 #else 215 int pmapcheck = 0; 216 #endif 217 void pmap_pvo_verify(void); 218 STATIC void pmap_pvo_check(const struct pvo_entry *); 219 #define PMAP_PVO_CHECK(pvo) \ 220 do { \ 221 if (pmapcheck) \ 222 pmap_pvo_check(pvo); \ 223 } while (0) 224 #else 225 #define PMAP_PVO_CHECK(pvo) do { } while (/*CONSTCOND*/0) 226 #endif 227 STATIC int pmap_pte_insert(int, struct pte *); 228 STATIC int pmap_pvo_enter(pmap_t, struct pool *, struct pvo_head *, 229 vaddr_t, paddr_t, register_t, int); 230 STATIC void pmap_pvo_remove(struct pvo_entry *, int); 231 STATIC struct pvo_entry *pmap_pvo_find_va(pmap_t, vaddr_t, int *); 232 STATIC volatile struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int); 233 234 STATIC void tlbia(void); 235 236 STATIC void pmap_release(pmap_t); 237 STATIC void *pmap_boot_find_memory(psize_t, psize_t, int); 238 239 #define VSID_NBPW (sizeof(uint32_t) * 8) 240 static uint32_t pmap_vsid_bitmap[NPMAPS / VSID_NBPW]; 241 242 static int pmap_initialized; 243 244 #if defined(DEBUG) || defined(PMAPDEBUG) 245 #define PMAPDEBUG_BOOT 0x0001 246 #define PMAPDEBUG_PTE 0x0002 247 #define PMAPDEBUG_EXEC 0x0008 248 #define PMAPDEBUG_PVOENTER 0x0010 249 #define PMAPDEBUG_PVOREMOVE 0x0020 250 #define PMAPDEBUG_ACTIVATE 0x0100 251 #define PMAPDEBUG_CREATE 0x0200 252 #define PMAPDEBUG_ENTER 0x1000 253 #define PMAPDEBUG_KENTER 0x2000 254 #define PMAPDEBUG_KREMOVE 0x4000 255 #define PMAPDEBUG_REMOVE 0x8000 256 unsigned int pmapdebug = 0; 257 # define DPRINTF(x) printf x 258 # define DPRINTFN(n, x) if (pmapdebug & PMAPDEBUG_ ## n) printf x 259 #else 260 # define DPRINTF(x) 261 # define DPRINTFN(n, x) 262 #endif 263 264 265 #ifdef PMAPCOUNTERS 266 #define PMAPCOUNT(ev) ((pmap_evcnt_ ## ev).ev_count++) 267 #define PMAPCOUNT2(ev) ((ev).ev_count++) 268 269 struct evcnt pmap_evcnt_mappings = 270 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, 271 "pmap", "pages mapped"); 272 struct evcnt pmap_evcnt_unmappings = 273 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_mappings, 274 "pmap", "pages unmapped"); 275 276 struct evcnt pmap_evcnt_kernel_mappings = 277 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, 278 "pmap", "kernel pages mapped"); 279 struct evcnt pmap_evcnt_kernel_unmappings = 280 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_kernel_mappings, 281 "pmap", "kernel pages unmapped"); 282 283 struct evcnt pmap_evcnt_mappings_replaced = 284 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, 285 "pmap", "page mappings replaced"); 286 287 struct evcnt pmap_evcnt_exec_mappings = 288 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_mappings, 289 "pmap", "exec pages mapped"); 290 struct evcnt pmap_evcnt_exec_cached = 291 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_mappings, 292 "pmap", "exec pages cached"); 293 294 struct evcnt pmap_evcnt_exec_synced = 295 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings, 296 "pmap", "exec pages synced"); 297 struct evcnt pmap_evcnt_exec_synced_clear_modify = 298 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings, 299 "pmap", "exec pages synced (CM)"); 300 301 struct evcnt pmap_evcnt_exec_uncached_page_protect = 302 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings, 303 "pmap", "exec pages uncached (PP)"); 304 struct evcnt pmap_evcnt_exec_uncached_clear_modify = 305 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings, 306 "pmap", "exec pages uncached (CM)"); 307 struct evcnt pmap_evcnt_exec_uncached_zero_page = 308 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings, 309 "pmap", "exec pages uncached (ZP)"); 310 struct evcnt pmap_evcnt_exec_uncached_copy_page = 311 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings, 312 "pmap", "exec pages uncached (CP)"); 313 314 struct evcnt pmap_evcnt_updates = 315 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, 316 "pmap", "updates"); 317 struct evcnt pmap_evcnt_collects = 318 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, 319 "pmap", "collects"); 320 struct evcnt pmap_evcnt_copies = 321 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, 322 "pmap", "copies"); 323 324 struct evcnt pmap_evcnt_ptes_spilled = 325 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, 326 "pmap", "ptes spilled from overflow"); 327 struct evcnt pmap_evcnt_ptes_unspilled = 328 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, 329 "pmap", "ptes not spilled"); 330 struct evcnt pmap_evcnt_ptes_evicted = 331 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, 332 "pmap", "ptes evicted"); 333 334 struct evcnt pmap_evcnt_ptes_primary[8] = { 335 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, 336 "pmap", "ptes added at primary[0]"), 337 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, 338 "pmap", "ptes added at primary[1]"), 339 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, 340 "pmap", "ptes added at primary[2]"), 341 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, 342 "pmap", "ptes added at primary[3]"), 343 344 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, 345 "pmap", "ptes added at primary[4]"), 346 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, 347 "pmap", "ptes added at primary[5]"), 348 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, 349 "pmap", "ptes added at primary[6]"), 350 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, 351 "pmap", "ptes added at primary[7]"), 352 }; 353 struct evcnt pmap_evcnt_ptes_secondary[8] = { 354 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, 355 "pmap", "ptes added at secondary[0]"), 356 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, 357 "pmap", "ptes added at secondary[1]"), 358 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, 359 "pmap", "ptes added at secondary[2]"), 360 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, 361 "pmap", "ptes added at secondary[3]"), 362 363 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, 364 "pmap", "ptes added at secondary[4]"), 365 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, 366 "pmap", "ptes added at secondary[5]"), 367 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, 368 "pmap", "ptes added at secondary[6]"), 369 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, 370 "pmap", "ptes added at secondary[7]"), 371 }; 372 struct evcnt pmap_evcnt_ptes_removed = 373 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, 374 "pmap", "ptes removed"); 375 struct evcnt pmap_evcnt_ptes_changed = 376 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, 377 "pmap", "ptes changed"); 378 379 /* 380 * From pmap_subr.c 381 */ 382 extern struct evcnt pmap_evcnt_zeroed_pages; 383 extern struct evcnt pmap_evcnt_copied_pages; 384 extern struct evcnt pmap_evcnt_idlezeroed_pages; 385 #else 386 #define PMAPCOUNT(ev) ((void) 0) 387 #define PMAPCOUNT2(ev) ((void) 0) 388 #endif 389 390 #define TLBIE(va) __asm __volatile("tlbie %0" :: "r"(va)) 391 #define TLBSYNC() __asm __volatile("tlbsync") 392 #define SYNC() __asm __volatile("sync") 393 #define EIEIO() __asm __volatile("eieio") 394 #define MFMSR() mfmsr() 395 #define MTMSR(psl) mtmsr(psl) 396 #define MFPVR() mfpvr() 397 #define MFSRIN(va) mfsrin(va) 398 #define MFTB() mfrtcltbl() 399 400 static __inline register_t 401 mfsrin(vaddr_t va) 402 { 403 register_t sr; 404 __asm __volatile ("mfsrin %0,%1" : "=r"(sr) : "r"(va)); 405 return sr; 406 } 407 408 static __inline register_t 409 pmap_interrupts_off(void) 410 { 411 register_t msr = MFMSR(); 412 if (msr & PSL_EE) 413 MTMSR(msr & ~PSL_EE); 414 return msr; 415 } 416 417 static void 418 pmap_interrupts_restore(register_t msr) 419 { 420 if (msr & PSL_EE) 421 MTMSR(msr); 422 } 423 424 static __inline u_int32_t 425 mfrtcltbl(void) 426 { 427 428 if ((MFPVR() >> 16) == MPC601) 429 return (mfrtcl() >> 7); 430 else 431 return (mftbl()); 432 } 433 434 /* 435 * These small routines may have to be replaced, 436 * if/when we support processors other that the 604. 437 */ 438 439 void 440 tlbia(void) 441 { 442 caddr_t i; 443 444 SYNC(); 445 /* 446 * Why not use "tlbia"? Because not all processors implement it. 447 * 448 * This needs to be a per-cpu callback to do the appropriate thing 449 * for the CPU. XXX 450 */ 451 for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) { 452 TLBIE(i); 453 EIEIO(); 454 SYNC(); 455 } 456 TLBSYNC(); 457 SYNC(); 458 } 459 460 static __inline register_t 461 va_to_vsid(const struct pmap *pm, vaddr_t addr) 462 { 463 return (pm->pm_sr[addr >> ADDR_SR_SHFT] & SR_VSID); 464 } 465 466 static __inline register_t 467 va_to_pteg(const struct pmap *pm, vaddr_t addr) 468 { 469 register_t hash; 470 471 hash = va_to_vsid(pm, addr) ^ ((addr & ADDR_PIDX) >> ADDR_PIDX_SHFT); 472 return hash & pmap_pteg_mask; 473 } 474 475 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 476 /* 477 * Given a PTE in the page table, calculate the VADDR that hashes to it. 478 * The only bit of magic is that the top 4 bits of the address doesn't 479 * technically exist in the PTE. But we know we reserved 4 bits of the 480 * VSID for it so that's how we get it. 481 */ 482 static vaddr_t 483 pmap_pte_to_va(volatile const struct pte *pt) 484 { 485 vaddr_t va; 486 uintptr_t ptaddr = (uintptr_t) pt; 487 488 if (pt->pte_hi & PTE_HID) 489 ptaddr ^= (pmap_pteg_mask * sizeof(struct pteg)); 490 491 /* PPC Bits 10-19 */ 492 va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x3ff; 493 va <<= ADDR_PIDX_SHFT; 494 495 /* PPC Bits 4-9 */ 496 va |= (pt->pte_hi & PTE_API) << ADDR_API_SHFT; 497 498 /* PPC Bits 0-3 */ 499 va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT; 500 501 return va; 502 } 503 #endif 504 505 static __inline struct pvo_head * 506 pa_to_pvoh(paddr_t pa, struct vm_page **pg_p) 507 { 508 #ifdef __HAVE_VM_PAGE_MD 509 struct vm_page *pg; 510 511 pg = PHYS_TO_VM_PAGE(pa); 512 if (pg_p != NULL) 513 *pg_p = pg; 514 if (pg == NULL) 515 return &pmap_pvo_unmanaged; 516 return &pg->mdpage.mdpg_pvoh; 517 #endif 518 #ifdef __HAVE_PMAP_PHYSSEG 519 int bank, pg; 520 521 bank = vm_physseg_find(atop(pa), &pg); 522 if (pg_p != NULL) 523 *pg_p = pg; 524 if (bank == -1) 525 return &pmap_pvo_unmanaged; 526 return &vm_physmem[bank].pmseg.pvoh[pg]; 527 #endif 528 } 529 530 static __inline struct pvo_head * 531 vm_page_to_pvoh(struct vm_page *pg) 532 { 533 #ifdef __HAVE_VM_PAGE_MD 534 return &pg->mdpage.mdpg_pvoh; 535 #endif 536 #ifdef __HAVE_PMAP_PHYSSEG 537 return pa_to_pvoh(VM_PAGE_TO_PHYS(pg), NULL); 538 #endif 539 } 540 541 542 #ifdef __HAVE_PMAP_PHYSSEG 543 static __inline char * 544 pa_to_attr(paddr_t pa) 545 { 546 int bank, pg; 547 548 bank = vm_physseg_find(atop(pa), &pg); 549 if (bank == -1) 550 return NULL; 551 return &vm_physmem[bank].pmseg.attrs[pg]; 552 } 553 #endif 554 555 static __inline void 556 pmap_attr_clear(struct vm_page *pg, int ptebit) 557 { 558 #ifdef __HAVE_PMAP_PHYSSEG 559 *pa_to_attr(VM_PAGE_TO_PHYS(pg)) &= ~(ptebit >> ATTR_SHFT); 560 #endif 561 #ifdef __HAVE_VM_PAGE_MD 562 pg->mdpage.mdpg_attrs &= ~ptebit; 563 #endif 564 } 565 566 static __inline int 567 pmap_attr_fetch(struct vm_page *pg) 568 { 569 #ifdef __HAVE_PMAP_PHYSSEG 570 return *pa_to_attr(VM_PAGE_TO_PHYS(pg)) << ATTR_SHFT; 571 #endif 572 #ifdef __HAVE_VM_PAGE_MD 573 return pg->mdpage.mdpg_attrs; 574 #endif 575 } 576 577 static __inline void 578 pmap_attr_save(struct vm_page *pg, int ptebit) 579 { 580 #ifdef __HAVE_PMAP_PHYSSEG 581 *pa_to_attr(VM_PAGE_TO_PHYS(pg)) |= (ptebit >> ATTR_SHFT); 582 #endif 583 #ifdef __HAVE_VM_PAGE_MD 584 pg->mdpage.mdpg_attrs |= ptebit; 585 #endif 586 } 587 588 static __inline int 589 pmap_pte_compare(const volatile struct pte *pt, const struct pte *pvo_pt) 590 { 591 if (pt->pte_hi == pvo_pt->pte_hi 592 #if 0 593 && ((pt->pte_lo ^ pvo_pt->pte_lo) & 594 ~(PTE_REF|PTE_CHG)) == 0 595 #endif 596 ) 597 return 1; 598 return 0; 599 } 600 601 static __inline void 602 pmap_pte_create(struct pte *pt, const struct pmap *pm, vaddr_t va, register_t pte_lo) 603 { 604 /* 605 * Construct the PTE. Default to IMB initially. Valid bit 606 * only gets set when the real pte is set in memory. 607 * 608 * Note: Don't set the valid bit for correct operation of tlb update. 609 */ 610 pt->pte_hi = (va_to_vsid(pm, va) << PTE_VSID_SHFT) 611 | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API); 612 pt->pte_lo = pte_lo; 613 } 614 615 static __inline void 616 pmap_pte_synch(volatile struct pte *pt, struct pte *pvo_pt) 617 { 618 pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF|PTE_CHG); 619 } 620 621 static __inline void 622 pmap_pte_clear(volatile struct pte *pt, vaddr_t va, int ptebit) 623 { 624 /* 625 * As shown in Section 7.6.3.2.3 626 */ 627 pt->pte_lo &= ~ptebit; 628 TLBIE(va); 629 SYNC(); 630 EIEIO(); 631 TLBSYNC(); 632 SYNC(); 633 } 634 635 static __inline void 636 pmap_pte_set(volatile struct pte *pt, struct pte *pvo_pt) 637 { 638 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 639 if (pvo_pt->pte_hi & PTE_VALID) 640 panic("pte_set: setting an already valid pte %p", pvo_pt); 641 #endif 642 pvo_pt->pte_hi |= PTE_VALID; 643 /* 644 * Update the PTE as defined in section 7.6.3.1 645 * Note that the REF/CHG bits are from pvo_pt and thus should 646 * have been saved so this routine can restore them (if desired). 647 */ 648 pt->pte_lo = pvo_pt->pte_lo; 649 EIEIO(); 650 pt->pte_hi = pvo_pt->pte_hi; 651 SYNC(); 652 pmap_pte_valid++; 653 } 654 655 static __inline void 656 pmap_pte_unset(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va) 657 { 658 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 659 if ((pvo_pt->pte_hi & PTE_VALID) == 0) 660 panic("pte_unset: attempt to unset an inactive pte#1 %p/%p", pvo_pt, pt); 661 if ((pt->pte_hi & PTE_VALID) == 0) 662 panic("pte_unset: attempt to unset an inactive pte#2 %p/%p", pvo_pt, pt); 663 #endif 664 665 pvo_pt->pte_hi &= ~PTE_VALID; 666 /* 667 * Force the ref & chg bits back into the PTEs. 668 */ 669 SYNC(); 670 /* 671 * Invalidate the pte ... (Section 7.6.3.3) 672 */ 673 pt->pte_hi &= ~PTE_VALID; 674 SYNC(); 675 TLBIE(va); 676 SYNC(); 677 EIEIO(); 678 TLBSYNC(); 679 SYNC(); 680 /* 681 * Save the ref & chg bits ... 682 */ 683 pmap_pte_synch(pt, pvo_pt); 684 pmap_pte_valid--; 685 } 686 687 static __inline void 688 pmap_pte_change(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va) 689 { 690 /* 691 * Invalidate the PTE 692 */ 693 pmap_pte_unset(pt, pvo_pt, va); 694 pmap_pte_set(pt, pvo_pt); 695 } 696 697 /* 698 * Try to insert the PTE @ *pvo_pt into the pmap_pteg_table at ptegidx 699 * (either primary or secondary location). 700 * 701 * Note: both the destination and source PTEs must not have PTE_VALID set. 702 */ 703 704 STATIC int 705 pmap_pte_insert(int ptegidx, struct pte *pvo_pt) 706 { 707 volatile struct pte *pt; 708 int i; 709 710 #if defined(DEBUG) 711 DPRINTFN(PTE, ("pmap_pte_insert: idx 0x%x, pte 0x%lx 0x%lx\n", 712 ptegidx, pvo_pt->pte_hi, pvo_pt->pte_lo)); 713 #endif 714 /* 715 * First try primary hash. 716 */ 717 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 718 if ((pt->pte_hi & PTE_VALID) == 0) { 719 pvo_pt->pte_hi &= ~PTE_HID; 720 pmap_pte_set(pt, pvo_pt); 721 return i; 722 } 723 } 724 725 /* 726 * Now try secondary hash. 727 */ 728 ptegidx ^= pmap_pteg_mask; 729 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 730 if ((pt->pte_hi & PTE_VALID) == 0) { 731 pvo_pt->pte_hi |= PTE_HID; 732 pmap_pte_set(pt, pvo_pt); 733 return i; 734 } 735 } 736 return -1; 737 } 738 739 /* 740 * Spill handler. 741 * 742 * Tries to spill a page table entry from the overflow area. 743 * This runs in either real mode (if dealing with a exception spill) 744 * or virtual mode when dealing with manually spilling one of the 745 * kernel's pte entries. In either case, interrupts are already 746 * disabled. 747 */ 748 int 749 pmap_pte_spill(struct pmap *pm, vaddr_t addr) 750 { 751 struct pvo_entry *source_pvo, *victim_pvo, *next_pvo; 752 struct pvo_entry *pvo; 753 struct pvo_tqhead *pvoh, *vpvoh; 754 int ptegidx, i, j; 755 volatile struct pteg *pteg; 756 volatile struct pte *pt; 757 758 ptegidx = va_to_pteg(pm, addr); 759 760 /* 761 * Have to substitute some entry. Use the primary hash for this. 762 * 763 * Use low bits of timebase as random generator 764 */ 765 pteg = &pmap_pteg_table[ptegidx]; 766 i = MFTB() & 7; 767 pt = &pteg->pt[i]; 768 769 source_pvo = NULL; 770 victim_pvo = NULL; 771 pvoh = &pmap_pvo_table[ptegidx]; 772 TAILQ_FOREACH(pvo, pvoh, pvo_olink) { 773 774 /* 775 * We need to find pvo entry for this address... 776 */ 777 PMAP_PVO_CHECK(pvo); /* sanity check */ 778 779 /* 780 * If we haven't found the source and we come to a PVO with 781 * a valid PTE, then we know we can't find it because all 782 * evicted PVOs always are first in the list. 783 */ 784 if (source_pvo == NULL && (pvo->pvo_pte.pte_hi & PTE_VALID)) 785 break; 786 if (source_pvo == NULL && pm == pvo->pvo_pmap && 787 addr == PVO_VADDR(pvo)) { 788 789 /* 790 * Now we have found the entry to be spilled into the 791 * pteg. Attempt to insert it into the page table. 792 */ 793 j = pmap_pte_insert(ptegidx, &pvo->pvo_pte); 794 if (j >= 0) { 795 PVO_PTEGIDX_SET(pvo, j); 796 PMAP_PVO_CHECK(pvo); /* sanity check */ 797 pvo->pvo_pmap->pm_evictions--; 798 PMAPCOUNT(ptes_spilled); 799 PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID) 800 ? pmap_evcnt_ptes_secondary 801 : pmap_evcnt_ptes_primary)[j]); 802 803 /* 804 * Since we keep the evicted entries at the 805 * from of the PVO list, we need move this 806 * (now resident) PVO after the evicted 807 * entries. 808 */ 809 next_pvo = TAILQ_NEXT(pvo, pvo_olink); 810 811 /* 812 * If we don't have to move (either we were the 813 * last entry or the next entry was valid), 814 * don't change our position. Otherwise 815 * move ourselves to the tail of the queue. 816 */ 817 if (next_pvo != NULL && 818 !(next_pvo->pvo_pte.pte_hi & PTE_VALID)) { 819 TAILQ_REMOVE(pvoh, pvo, pvo_olink); 820 TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink); 821 } 822 return 1; 823 } 824 source_pvo = pvo; 825 if (victim_pvo != NULL) 826 break; 827 } 828 829 /* 830 * We also need the pvo entry of the victim we are replacing 831 * so save the R & C bits of the PTE. 832 */ 833 if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && 834 pmap_pte_compare(pt, &pvo->pvo_pte)) { 835 vpvoh = pvoh; 836 victim_pvo = pvo; 837 if (source_pvo != NULL) 838 break; 839 } 840 } 841 842 if (source_pvo == NULL) { 843 PMAPCOUNT(ptes_unspilled); 844 return 0; 845 } 846 847 if (victim_pvo == NULL) { 848 if ((pt->pte_hi & PTE_HID) == 0) 849 panic("pmap_pte_spill: victim p-pte (%p) has " 850 "no pvo entry!", pt); 851 852 /* 853 * If this is a secondary PTE, we need to search 854 * its primary pvo bucket for the matching PVO. 855 */ 856 vpvoh = &pmap_pvo_table[ptegidx ^ pmap_pteg_mask]; 857 TAILQ_FOREACH(pvo, vpvoh, pvo_olink) { 858 PMAP_PVO_CHECK(pvo); /* sanity check */ 859 860 /* 861 * We also need the pvo entry of the victim we are 862 * replacing so save the R & C bits of the PTE. 863 */ 864 if (pmap_pte_compare(pt, &pvo->pvo_pte)) { 865 victim_pvo = pvo; 866 break; 867 } 868 } 869 if (victim_pvo == NULL) 870 panic("pmap_pte_spill: victim s-pte (%p) has " 871 "no pvo entry!", pt); 872 } 873 874 /* 875 * We are invalidating the TLB entry for the EA for the 876 * we are replacing even though its valid; If we don't 877 * we lose any ref/chg bit changes contained in the TLB 878 * entry. 879 */ 880 source_pvo->pvo_pte.pte_hi &= ~PTE_HID; 881 882 /* 883 * To enforce the PVO list ordering constraint that all 884 * evicted entries should come before all valid entries, 885 * move the source PVO to the tail of its list and the 886 * victim PVO to the head of its list (which might not be 887 * the same list, if the victim was using the secondary hash). 888 */ 889 TAILQ_REMOVE(pvoh, source_pvo, pvo_olink); 890 TAILQ_INSERT_TAIL(pvoh, source_pvo, pvo_olink); 891 TAILQ_REMOVE(vpvoh, victim_pvo, pvo_olink); 892 TAILQ_INSERT_HEAD(vpvoh, victim_pvo, pvo_olink); 893 pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr); 894 pmap_pte_set(pt, &source_pvo->pvo_pte); 895 victim_pvo->pvo_pmap->pm_evictions++; 896 source_pvo->pvo_pmap->pm_evictions--; 897 898 PVO_PTEGIDX_CLR(victim_pvo); 899 PVO_PTEGIDX_SET(source_pvo, i); 900 PMAPCOUNT2(pmap_evcnt_ptes_primary[i]); 901 PMAPCOUNT(ptes_spilled); 902 PMAPCOUNT(ptes_evicted); 903 PMAPCOUNT(ptes_removed); 904 905 PMAP_PVO_CHECK(victim_pvo); 906 PMAP_PVO_CHECK(source_pvo); 907 return 1; 908 } 909 910 /* 911 * Restrict given range to physical memory 912 */ 913 void 914 pmap_real_memory(paddr_t *start, psize_t *size) 915 { 916 struct mem_region *mp; 917 918 for (mp = mem; mp->size; mp++) { 919 if (*start + *size > mp->start 920 && *start < mp->start + mp->size) { 921 if (*start < mp->start) { 922 *size -= mp->start - *start; 923 *start = mp->start; 924 } 925 if (*start + *size > mp->start + mp->size) 926 *size = mp->start + mp->size - *start; 927 return; 928 } 929 } 930 *size = 0; 931 } 932 933 /* 934 * Initialize anything else for pmap handling. 935 * Called during vm_init(). 936 */ 937 void 938 pmap_init(void) 939 { 940 int s; 941 #ifdef __HAVE_PMAP_PHYSSEG 942 struct pvo_tqhead *pvoh; 943 int bank; 944 long sz; 945 char *attr; 946 947 s = splvm(); 948 pvoh = pmap_physseg.pvoh; 949 attr = pmap_physseg.attrs; 950 for (bank = 0; bank < vm_nphysseg; bank++) { 951 sz = vm_physmem[bank].end - vm_physmem[bank].start; 952 vm_physmem[bank].pmseg.pvoh = pvoh; 953 vm_physmem[bank].pmseg.attrs = attr; 954 for (; sz > 0; sz--, pvoh++, attr++) { 955 TAILQ_INIT(pvoh); 956 *attr = 0; 957 } 958 } 959 splx(s); 960 #endif 961 962 s = splvm(); 963 pool_init(&pmap_mpvo_pool, sizeof(struct pvo_entry), 964 sizeof(struct pvo_entry), 0, 0, "pmap_mpvopl", 965 &pmap_pool_mallocator); 966 967 pool_setlowat(&pmap_mpvo_pool, 1008); 968 969 pmap_initialized = 1; 970 splx(s); 971 972 #ifdef PMAPCOUNTERS 973 evcnt_attach_static(&pmap_evcnt_mappings); 974 evcnt_attach_static(&pmap_evcnt_mappings_replaced); 975 evcnt_attach_static(&pmap_evcnt_unmappings); 976 977 evcnt_attach_static(&pmap_evcnt_kernel_mappings); 978 evcnt_attach_static(&pmap_evcnt_kernel_unmappings); 979 980 evcnt_attach_static(&pmap_evcnt_exec_mappings); 981 evcnt_attach_static(&pmap_evcnt_exec_cached); 982 evcnt_attach_static(&pmap_evcnt_exec_synced); 983 evcnt_attach_static(&pmap_evcnt_exec_synced_clear_modify); 984 985 evcnt_attach_static(&pmap_evcnt_exec_uncached_page_protect); 986 evcnt_attach_static(&pmap_evcnt_exec_uncached_clear_modify); 987 evcnt_attach_static(&pmap_evcnt_exec_uncached_zero_page); 988 evcnt_attach_static(&pmap_evcnt_exec_uncached_copy_page); 989 990 evcnt_attach_static(&pmap_evcnt_zeroed_pages); 991 evcnt_attach_static(&pmap_evcnt_copied_pages); 992 evcnt_attach_static(&pmap_evcnt_idlezeroed_pages); 993 994 evcnt_attach_static(&pmap_evcnt_updates); 995 evcnt_attach_static(&pmap_evcnt_collects); 996 evcnt_attach_static(&pmap_evcnt_copies); 997 998 evcnt_attach_static(&pmap_evcnt_ptes_spilled); 999 evcnt_attach_static(&pmap_evcnt_ptes_unspilled); 1000 evcnt_attach_static(&pmap_evcnt_ptes_evicted); 1001 evcnt_attach_static(&pmap_evcnt_ptes_removed); 1002 evcnt_attach_static(&pmap_evcnt_ptes_changed); 1003 evcnt_attach_static(&pmap_evcnt_ptes_primary[0]); 1004 evcnt_attach_static(&pmap_evcnt_ptes_primary[1]); 1005 evcnt_attach_static(&pmap_evcnt_ptes_primary[2]); 1006 evcnt_attach_static(&pmap_evcnt_ptes_primary[3]); 1007 evcnt_attach_static(&pmap_evcnt_ptes_primary[4]); 1008 evcnt_attach_static(&pmap_evcnt_ptes_primary[5]); 1009 evcnt_attach_static(&pmap_evcnt_ptes_primary[6]); 1010 evcnt_attach_static(&pmap_evcnt_ptes_primary[7]); 1011 evcnt_attach_static(&pmap_evcnt_ptes_secondary[0]); 1012 evcnt_attach_static(&pmap_evcnt_ptes_secondary[1]); 1013 evcnt_attach_static(&pmap_evcnt_ptes_secondary[2]); 1014 evcnt_attach_static(&pmap_evcnt_ptes_secondary[3]); 1015 evcnt_attach_static(&pmap_evcnt_ptes_secondary[4]); 1016 evcnt_attach_static(&pmap_evcnt_ptes_secondary[5]); 1017 evcnt_attach_static(&pmap_evcnt_ptes_secondary[6]); 1018 evcnt_attach_static(&pmap_evcnt_ptes_secondary[7]); 1019 #endif 1020 } 1021 1022 /* 1023 * How much virtual space does the kernel get? 1024 */ 1025 void 1026 pmap_virtual_space(vaddr_t *start, vaddr_t *end) 1027 { 1028 /* 1029 * For now, reserve one segment (minus some overhead) for kernel 1030 * virtual memory 1031 */ 1032 *start = VM_MIN_KERNEL_ADDRESS; 1033 *end = VM_MAX_KERNEL_ADDRESS; 1034 } 1035 1036 /* 1037 * Allocate, initialize, and return a new physical map. 1038 */ 1039 pmap_t 1040 pmap_create(void) 1041 { 1042 pmap_t pm; 1043 1044 pm = pool_get(&pmap_pool, PR_WAITOK); 1045 memset((caddr_t)pm, 0, sizeof *pm); 1046 pmap_pinit(pm); 1047 1048 DPRINTFN(CREATE,("pmap_create: pm %p:\n" 1049 "\t%06lx %06lx %06lx %06lx %06lx %06lx %06lx %06lx\n" 1050 "\t%06lx %06lx %06lx %06lx %06lx %06lx %06lx %06lx\n", pm, 1051 pm->pm_sr[0], pm->pm_sr[1], pm->pm_sr[2], pm->pm_sr[3], 1052 pm->pm_sr[4], pm->pm_sr[5], pm->pm_sr[6], pm->pm_sr[7], 1053 pm->pm_sr[8], pm->pm_sr[9], pm->pm_sr[10], pm->pm_sr[11], 1054 pm->pm_sr[12], pm->pm_sr[13], pm->pm_sr[14], pm->pm_sr[15])); 1055 return pm; 1056 } 1057 1058 /* 1059 * Initialize a preallocated and zeroed pmap structure. 1060 */ 1061 void 1062 pmap_pinit(pmap_t pm) 1063 { 1064 register_t entropy = MFTB(); 1065 register_t mask; 1066 int i; 1067 1068 /* 1069 * Allocate some segment registers for this pmap. 1070 */ 1071 pm->pm_refs = 1; 1072 for (i = 0; i < NPMAPS; i += VSID_NBPW) { 1073 static register_t pmap_vsidcontext; 1074 register_t hash; 1075 unsigned int n; 1076 1077 /* Create a new value by multiplying by a prime adding in 1078 * entropy from the timebase register. This is to make the 1079 * VSID more random so that the PT Hash function collides 1080 * less often. (note that the prime causes gcc to do shifts 1081 * instead of a multiply) 1082 */ 1083 pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy; 1084 hash = pmap_vsidcontext & (NPMAPS - 1); 1085 if (hash == 0) /* 0 is special, avoid it */ 1086 continue; 1087 n = hash >> 5; 1088 mask = 1L << (hash & (VSID_NBPW-1)); 1089 hash = pmap_vsidcontext; 1090 if (pmap_vsid_bitmap[n] & mask) { /* collision? */ 1091 /* anything free in this bucket? */ 1092 if (~pmap_vsid_bitmap[n] == 0) { 1093 entropy = hash >> PTE_VSID_SHFT; 1094 continue; 1095 } 1096 i = ffs(~pmap_vsid_bitmap[n]) - 1; 1097 mask = 1L << i; 1098 hash &= ~(VSID_NBPW-1); 1099 hash |= i; 1100 } 1101 /* 1102 * Make sure clear out SR_KEY_LEN bits because we put our 1103 * our data in those bits (to identify the segment). 1104 */ 1105 hash &= PTE_VSID >> (PTE_VSID_SHFT + SR_KEY_LEN); 1106 pmap_vsid_bitmap[n] |= mask; 1107 for (i = 0; i < 16; i++) 1108 pm->pm_sr[i] = VSID_MAKE(i, hash) | SR_PRKEY; 1109 return; 1110 } 1111 panic("pmap_pinit: out of segments"); 1112 } 1113 1114 /* 1115 * Add a reference to the given pmap. 1116 */ 1117 void 1118 pmap_reference(pmap_t pm) 1119 { 1120 pm->pm_refs++; 1121 } 1122 1123 /* 1124 * Retire the given pmap from service. 1125 * Should only be called if the map contains no valid mappings. 1126 */ 1127 void 1128 pmap_destroy(pmap_t pm) 1129 { 1130 if (--pm->pm_refs == 0) { 1131 pmap_release(pm); 1132 pool_put(&pmap_pool, pm); 1133 } 1134 } 1135 1136 /* 1137 * Release any resources held by the given physical map. 1138 * Called when a pmap initialized by pmap_pinit is being released. 1139 */ 1140 void 1141 pmap_release(pmap_t pm) 1142 { 1143 int idx, mask; 1144 1145 if (pm->pm_sr[0] == 0) 1146 panic("pmap_release"); 1147 idx = VSID_TO_HASH(pm->pm_sr[0]) & (NPMAPS-1); 1148 mask = 1 << (idx % VSID_NBPW); 1149 idx /= VSID_NBPW; 1150 pmap_vsid_bitmap[idx] &= ~mask; 1151 } 1152 1153 /* 1154 * Copy the range specified by src_addr/len 1155 * from the source map to the range dst_addr/len 1156 * in the destination map. 1157 * 1158 * This routine is only advisory and need not do anything. 1159 */ 1160 void 1161 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, 1162 vsize_t len, vaddr_t src_addr) 1163 { 1164 PMAPCOUNT(copies); 1165 } 1166 1167 /* 1168 * Require that all active physical maps contain no 1169 * incorrect entries NOW. 1170 */ 1171 void 1172 pmap_update(struct pmap *pmap) 1173 { 1174 PMAPCOUNT(updates); 1175 TLBSYNC(); 1176 } 1177 1178 /* 1179 * Garbage collects the physical map system for 1180 * pages which are no longer used. 1181 * Success need not be guaranteed -- that is, there 1182 * may well be pages which are not referenced, but 1183 * others may be collected. 1184 * Called by the pageout daemon when pages are scarce. 1185 */ 1186 void 1187 pmap_collect(pmap_t pm) 1188 { 1189 PMAPCOUNT(collects); 1190 } 1191 1192 static __inline int 1193 pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 1194 { 1195 int pteidx; 1196 /* 1197 * We can find the actual pte entry without searching by 1198 * grabbing the PTEG index from 3 unused bits in pte_lo[11:9] 1199 * and by noticing the HID bit. 1200 */ 1201 pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); 1202 if (pvo->pvo_pte.pte_hi & PTE_HID) 1203 pteidx ^= pmap_pteg_mask * 8; 1204 return pteidx; 1205 } 1206 1207 volatile struct pte * 1208 pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 1209 { 1210 volatile struct pte *pt; 1211 1212 #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK) 1213 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) 1214 return NULL; 1215 #endif 1216 1217 /* 1218 * If we haven't been supplied the ptegidx, calculate it. 1219 */ 1220 if (pteidx == -1) { 1221 int ptegidx; 1222 ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr); 1223 pteidx = pmap_pvo_pte_index(pvo, ptegidx); 1224 } 1225 1226 pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7]; 1227 1228 #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK) 1229 return pt; 1230 #else 1231 if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { 1232 panic("pmap_pvo_to_pte: pvo %p: has valid pte in " 1233 "pvo but no valid pte index", pvo); 1234 } 1235 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { 1236 panic("pmap_pvo_to_pte: pvo %p: has valid pte index in " 1237 "pvo but no valid pte", pvo); 1238 } 1239 1240 if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { 1241 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) { 1242 #if defined(DEBUG) || defined(PMAPCHECK) 1243 pmap_pte_print(pt); 1244 #endif 1245 panic("pmap_pvo_to_pte: pvo %p: has valid pte in " 1246 "pmap_pteg_table %p but invalid in pvo", 1247 pvo, pt); 1248 } 1249 if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) != 0) { 1250 #if defined(DEBUG) || defined(PMAPCHECK) 1251 pmap_pte_print(pt); 1252 #endif 1253 panic("pmap_pvo_to_pte: pvo %p: pvo pte does " 1254 "not match pte %p in pmap_pteg_table", 1255 pvo, pt); 1256 } 1257 return pt; 1258 } 1259 1260 if (pvo->pvo_pte.pte_hi & PTE_VALID) { 1261 #if defined(DEBUG) || defined(PMAPCHECK) 1262 pmap_pte_print(pt); 1263 #endif 1264 panic("pmap_pvo_to_pte: pvo %p: has invalid pte %p in " 1265 "pmap_pteg_table but valid in pvo", pvo, pt); 1266 } 1267 return NULL; 1268 #endif /* !(!DIAGNOSTIC && !DEBUG && !PMAPCHECK) */ 1269 } 1270 1271 struct pvo_entry * 1272 pmap_pvo_find_va(pmap_t pm, vaddr_t va, int *pteidx_p) 1273 { 1274 struct pvo_entry *pvo; 1275 int ptegidx; 1276 1277 va &= ~ADDR_POFF; 1278 ptegidx = va_to_pteg(pm, va); 1279 1280 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 1281 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1282 if ((uintptr_t) pvo >= SEGMENT_LENGTH) 1283 panic("pmap_pvo_find_va: invalid pvo %p on " 1284 "list %#x (%p)", pvo, ptegidx, 1285 &pmap_pvo_table[ptegidx]); 1286 #endif 1287 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1288 if (pteidx_p) 1289 *pteidx_p = pmap_pvo_pte_index(pvo, ptegidx); 1290 return pvo; 1291 } 1292 } 1293 return NULL; 1294 } 1295 1296 #if defined(DEBUG) || defined(PMAPCHECK) 1297 void 1298 pmap_pvo_check(const struct pvo_entry *pvo) 1299 { 1300 struct pvo_head *pvo_head; 1301 struct pvo_entry *pvo0; 1302 volatile struct pte *pt; 1303 int failed = 0; 1304 1305 if ((uintptr_t)(pvo+1) >= SEGMENT_LENGTH) 1306 panic("pmap_pvo_check: pvo %p: invalid address", pvo); 1307 1308 if ((uintptr_t)(pvo->pvo_pmap+1) >= SEGMENT_LENGTH) { 1309 printf("pmap_pvo_check: pvo %p: invalid pmap address %p\n", 1310 pvo, pvo->pvo_pmap); 1311 failed = 1; 1312 } 1313 1314 if ((uintptr_t)TAILQ_NEXT(pvo, pvo_olink) >= SEGMENT_LENGTH || 1315 (((uintptr_t)TAILQ_NEXT(pvo, pvo_olink)) & 0x1f) != 0) { 1316 printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n", 1317 pvo, TAILQ_NEXT(pvo, pvo_olink)); 1318 failed = 1; 1319 } 1320 1321 if ((uintptr_t)LIST_NEXT(pvo, pvo_vlink) >= SEGMENT_LENGTH || 1322 (((uintptr_t)LIST_NEXT(pvo, pvo_vlink)) & 0x1f) != 0) { 1323 printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n", 1324 pvo, LIST_NEXT(pvo, pvo_vlink)); 1325 failed = 1; 1326 } 1327 1328 if (pvo->pvo_vaddr & PVO_MANAGED) { 1329 pvo_head = pa_to_pvoh(pvo->pvo_pte.pte_lo & PTE_RPGN, NULL); 1330 } else { 1331 if (pvo->pvo_vaddr < VM_MIN_KERNEL_ADDRESS) { 1332 printf("pmap_pvo_check: pvo %p: non kernel address " 1333 "on kernel unmanaged list\n", pvo); 1334 failed = 1; 1335 } 1336 pvo_head = &pmap_pvo_kunmanaged; 1337 } 1338 LIST_FOREACH(pvo0, pvo_head, pvo_vlink) { 1339 if (pvo0 == pvo) 1340 break; 1341 } 1342 if (pvo0 == NULL) { 1343 printf("pmap_pvo_check: pvo %p: not present " 1344 "on its vlist head %p\n", pvo, pvo_head); 1345 failed = 1; 1346 } 1347 if (pvo != pmap_pvo_find_va(pvo->pvo_pmap, pvo->pvo_vaddr, NULL)) { 1348 printf("pmap_pvo_check: pvo %p: not present " 1349 "on its olist head\n", pvo); 1350 failed = 1; 1351 } 1352 pt = pmap_pvo_to_pte(pvo, -1); 1353 if (pt == NULL) { 1354 if (pvo->pvo_pte.pte_hi & PTE_VALID) { 1355 printf("pmap_pvo_check: pvo %p: pte_hi VALID but " 1356 "no PTE\n", pvo); 1357 failed = 1; 1358 } 1359 } else { 1360 if ((uintptr_t) pt < (uintptr_t) &pmap_pteg_table[0] || 1361 (uintptr_t) pt >= 1362 (uintptr_t) &pmap_pteg_table[pmap_pteg_cnt]) { 1363 printf("pmap_pvo_check: pvo %p: pte %p not in " 1364 "pteg table\n", pvo, pt); 1365 failed = 1; 1366 } 1367 if (((((uintptr_t) pt) >> 3) & 7) != PVO_PTEGIDX_GET(pvo)) { 1368 printf("pmap_pvo_check: pvo %p: pte_hi VALID but " 1369 "no PTE\n", pvo); 1370 failed = 1; 1371 } 1372 if (pvo->pvo_pte.pte_hi != pt->pte_hi) { 1373 printf("pmap_pvo_check: pvo %p: pte_hi differ: " 1374 "%#lx/%#lx\n", pvo, pvo->pvo_pte.pte_hi, pt->pte_hi); 1375 failed = 1; 1376 } 1377 if (((pvo->pvo_pte.pte_lo ^ pt->pte_lo) & 1378 (PTE_PP|PTE_WIMG|PTE_RPGN)) != 0) { 1379 printf("pmap_pvo_check: pvo %p: pte_lo differ: " 1380 "%#lx/%#lx\n", pvo, 1381 pvo->pvo_pte.pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN), 1382 pt->pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN)); 1383 failed = 1; 1384 } 1385 if ((pmap_pte_to_va(pt) ^ PVO_VADDR(pvo)) & 0x0fffffff) { 1386 printf("pmap_pvo_check: pvo %p: PTE %p derived VA %#lx" 1387 " doesn't not match PVO's VA %#lx\n", 1388 pvo, pt, pmap_pte_to_va(pt), PVO_VADDR(pvo)); 1389 failed = 1; 1390 } 1391 if (failed) 1392 pmap_pte_print(pt); 1393 } 1394 if (failed) 1395 panic("pmap_pvo_check: pvo %p, pm %p: bugcheck!", pvo, 1396 pvo->pvo_pmap); 1397 } 1398 #endif /* DEBUG || PMAPCHECK */ 1399 1400 /* 1401 * This returns whether this is the first mapping of a page. 1402 */ 1403 int 1404 pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head, 1405 vaddr_t va, paddr_t pa, register_t pte_lo, int flags) 1406 { 1407 struct pvo_entry *pvo; 1408 struct pvo_tqhead *pvoh; 1409 register_t msr; 1410 int ptegidx; 1411 int i; 1412 int poolflags = PR_NOWAIT; 1413 1414 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1415 if (pmap_pvo_remove_depth > 0) 1416 panic("pmap_pvo_enter: called while pmap_pvo_remove active!"); 1417 if (++pmap_pvo_enter_depth > 1) 1418 panic("pmap_pvo_enter: called recursively!"); 1419 #endif 1420 1421 /* 1422 * Compute the PTE Group index. 1423 */ 1424 va &= ~ADDR_POFF; 1425 ptegidx = va_to_pteg(pm, va); 1426 1427 msr = pmap_interrupts_off(); 1428 /* 1429 * Remove any existing mapping for this page. Reuse the 1430 * pvo entry if there a mapping. 1431 */ 1432 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 1433 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1434 #ifdef DEBUG 1435 if ((pmapdebug & PMAPDEBUG_PVOENTER) && 1436 ((pvo->pvo_pte.pte_lo ^ (pa|pte_lo)) & 1437 ~(PTE_REF|PTE_CHG)) == 0 && 1438 va < VM_MIN_KERNEL_ADDRESS) { 1439 printf("pmap_pvo_enter: pvo %p: dup %#lx/%#lx\n", 1440 pvo, pvo->pvo_pte.pte_lo, pte_lo|pa); 1441 printf("pmap_pvo_enter: pte_hi=%#lx sr=%#lx\n", 1442 pvo->pvo_pte.pte_hi, 1443 pm->pm_sr[va >> ADDR_SR_SHFT]); 1444 pmap_pte_print(pmap_pvo_to_pte(pvo, -1)); 1445 #ifdef DDBX 1446 Debugger(); 1447 #endif 1448 } 1449 #endif 1450 PMAPCOUNT(mappings_replaced); 1451 pmap_pvo_remove(pvo, -1); 1452 break; 1453 } 1454 } 1455 1456 /* 1457 * If we aren't overwriting an mapping, try to allocate 1458 */ 1459 pmap_interrupts_restore(msr); 1460 pvo = pool_get(pl, poolflags); 1461 msr = pmap_interrupts_off(); 1462 if (pvo == NULL) { 1463 #if 0 1464 pvo = pmap_pvo_reclaim(pm); 1465 if (pvo == NULL) { 1466 #endif 1467 if ((flags & PMAP_CANFAIL) == 0) 1468 panic("pmap_pvo_enter: failed"); 1469 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1470 pmap_pvo_enter_depth--; 1471 #endif 1472 pmap_interrupts_restore(msr); 1473 return ENOMEM; 1474 #if 0 1475 } 1476 #endif 1477 } 1478 pvo->pvo_vaddr = va; 1479 pvo->pvo_pmap = pm; 1480 pvo->pvo_vaddr &= ~ADDR_POFF; 1481 if (flags & VM_PROT_EXECUTE) { 1482 PMAPCOUNT(exec_mappings); 1483 pvo->pvo_vaddr |= PVO_EXECUTABLE; 1484 } 1485 if (flags & PMAP_WIRED) 1486 pvo->pvo_vaddr |= PVO_WIRED; 1487 if (pvo_head != &pmap_pvo_kunmanaged) { 1488 pvo->pvo_vaddr |= PVO_MANAGED; 1489 PMAPCOUNT(mappings); 1490 } else { 1491 PMAPCOUNT(kernel_mappings); 1492 } 1493 pmap_pte_create(&pvo->pvo_pte, pm, va, pa | pte_lo); 1494 1495 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 1496 if (pvo->pvo_pte.pte_lo & PVO_WIRED) 1497 pvo->pvo_pmap->pm_stats.wired_count++; 1498 pvo->pvo_pmap->pm_stats.resident_count++; 1499 #if defined(DEBUG) 1500 if (pm != pmap_kernel() && va < VM_MIN_KERNEL_ADDRESS) 1501 DPRINTFN(PVOENTER, 1502 ("pmap_pvo_enter: pvo %p: pm %p va %#lx pa %#lx\n", 1503 pvo, pm, va, pa)); 1504 #endif 1505 1506 /* 1507 * We hope this succeeds but it isn't required. 1508 */ 1509 pvoh = &pmap_pvo_table[ptegidx]; 1510 i = pmap_pte_insert(ptegidx, &pvo->pvo_pte); 1511 if (i >= 0) { 1512 PVO_PTEGIDX_SET(pvo, i); 1513 PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID) 1514 ? pmap_evcnt_ptes_secondary : pmap_evcnt_ptes_primary)[i]); 1515 TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink); 1516 } else { 1517 1518 /* 1519 * Since we didn't have room for this entry (which makes it 1520 * and evicted entry), place it at the head of the list. 1521 */ 1522 TAILQ_INSERT_HEAD(pvoh, pvo, pvo_olink); 1523 PMAPCOUNT(ptes_evicted); 1524 pm->pm_evictions++; 1525 } 1526 PMAP_PVO_CHECK(pvo); /* sanity check */ 1527 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1528 pmap_pvo_enter_depth--; 1529 #endif 1530 pmap_interrupts_restore(msr); 1531 return 0; 1532 } 1533 1534 void 1535 pmap_pvo_remove(struct pvo_entry *pvo, int pteidx) 1536 { 1537 volatile struct pte *pt; 1538 int ptegidx; 1539 1540 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1541 if (++pmap_pvo_remove_depth > 1) 1542 panic("pmap_pvo_remove: called recursively!"); 1543 #endif 1544 1545 /* 1546 * If we haven't been supplied the ptegidx, calculate it. 1547 */ 1548 if (pteidx == -1) { 1549 ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr); 1550 pteidx = pmap_pvo_pte_index(pvo, ptegidx); 1551 } else { 1552 ptegidx = pteidx >> 3; 1553 if (pvo->pvo_pte.pte_hi & PTE_HID) 1554 ptegidx ^= pmap_pteg_mask; 1555 } 1556 PMAP_PVO_CHECK(pvo); /* sanity check */ 1557 1558 /* 1559 * If there is an active pte entry, we need to deactivate it 1560 * (and save the ref & chg bits). 1561 */ 1562 pt = pmap_pvo_to_pte(pvo, pteidx); 1563 if (pt != NULL) { 1564 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1565 PVO_PTEGIDX_CLR(pvo); 1566 PMAPCOUNT(ptes_removed); 1567 } else { 1568 KASSERT(pvo->pvo_pmap->pm_evictions > 0); 1569 pvo->pvo_pmap->pm_evictions--; 1570 } 1571 1572 /* 1573 * Update our statistics 1574 */ 1575 pvo->pvo_pmap->pm_stats.resident_count--; 1576 if (pvo->pvo_pte.pte_lo & PVO_WIRED) 1577 pvo->pvo_pmap->pm_stats.wired_count--; 1578 1579 /* 1580 * Save the REF/CHG bits into their cache if the page is managed. 1581 */ 1582 if (pvo->pvo_vaddr & PVO_MANAGED) { 1583 register_t ptelo = pvo->pvo_pte.pte_lo; 1584 struct vm_page *pg = PHYS_TO_VM_PAGE(ptelo & PTE_RPGN); 1585 1586 if (pg != NULL) { 1587 pmap_attr_save(pg, ptelo & (PTE_REF|PTE_CHG)); 1588 } 1589 PMAPCOUNT(unmappings); 1590 } else { 1591 PMAPCOUNT(kernel_unmappings); 1592 } 1593 1594 /* 1595 * Remove the PVO from its lists and return it to the pool. 1596 */ 1597 LIST_REMOVE(pvo, pvo_vlink); 1598 TAILQ_REMOVE(&pmap_pvo_table[ptegidx], pvo, pvo_olink); 1599 pool_put(pvo->pvo_vaddr & PVO_MANAGED 1600 ? &pmap_mpvo_pool : &pmap_upvo_pool, pvo); 1601 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1602 pmap_pvo_remove_depth--; 1603 #endif 1604 } 1605 1606 /* 1607 * Insert physical page at pa into the given pmap at virtual address va. 1608 */ 1609 int 1610 pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) 1611 { 1612 struct mem_region *mp; 1613 struct pvo_head *pvo_head; 1614 struct vm_page *pg; 1615 struct pool *pl; 1616 register_t pte_lo; 1617 int s; 1618 int error; 1619 u_int pvo_flags; 1620 u_int was_exec = 0; 1621 1622 if (__predict_false(!pmap_initialized)) { 1623 pvo_head = &pmap_pvo_kunmanaged; 1624 pl = &pmap_upvo_pool; 1625 pvo_flags = 0; 1626 pg = NULL; 1627 was_exec = PTE_EXEC; 1628 } else { 1629 pvo_head = pa_to_pvoh(pa, &pg); 1630 pl = &pmap_mpvo_pool; 1631 pvo_flags = PVO_MANAGED; 1632 } 1633 1634 DPRINTFN(ENTER, 1635 ("pmap_enter(%p, 0x%lx, 0x%lx, 0x%x, 0x%x):", 1636 pm, va, pa, prot, flags)); 1637 1638 /* 1639 * If this is a managed page, and it's the first reference to the 1640 * page clear the execness of the page. Otherwise fetch the execness. 1641 */ 1642 if (pg != NULL) 1643 was_exec = pmap_attr_fetch(pg) & PTE_EXEC; 1644 1645 DPRINTFN(ENTER, (" was_exec=%d", was_exec)); 1646 1647 /* 1648 * Assume the page is cache inhibited and access is guarded unless 1649 * it's in our available memory array. If it is in the memory array, 1650 * asssume it's in memory coherent memory. 1651 */ 1652 pte_lo = PTE_IG; 1653 if ((flags & PMAP_NC) == 0) { 1654 for (mp = mem; mp->size; mp++) { 1655 if (pa >= mp->start && pa < mp->start + mp->size) { 1656 pte_lo = PTE_M; 1657 break; 1658 } 1659 } 1660 } 1661 1662 if (prot & VM_PROT_WRITE) 1663 pte_lo |= PTE_BW; 1664 else 1665 pte_lo |= PTE_BR; 1666 1667 /* 1668 * If this was in response to a fault, "pre-fault" the PTE's 1669 * changed/referenced bit appropriately. 1670 */ 1671 if (flags & VM_PROT_WRITE) 1672 pte_lo |= PTE_CHG; 1673 if (flags & (VM_PROT_READ|VM_PROT_WRITE)) 1674 pte_lo |= PTE_REF; 1675 1676 #if 0 1677 if (pm == pmap_kernel()) { 1678 if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == VM_PROT_READ) 1679 printf("pmap_pvo_enter: Kernel RO va %#lx pa %#lx\n", 1680 va, pa); 1681 if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == VM_PROT_NONE) 1682 printf("pmap_pvo_enter: Kernel N/A va %#lx pa %#lx\n", 1683 va, pa); 1684 } 1685 #endif 1686 1687 /* 1688 * We need to know if this page can be executable 1689 */ 1690 flags |= (prot & VM_PROT_EXECUTE); 1691 1692 /* 1693 * Record mapping for later back-translation and pte spilling. 1694 * This will overwrite any existing mapping. 1695 */ 1696 s = splvm(); 1697 error = pmap_pvo_enter(pm, pl, pvo_head, va, pa, pte_lo, flags); 1698 splx(s); 1699 1700 /* 1701 * Flush the real page from the instruction cache if this page is 1702 * mapped executable and cacheable and has not been flushed since 1703 * the last time it was modified. 1704 */ 1705 if (error == 0 && 1706 (flags & VM_PROT_EXECUTE) && 1707 (pte_lo & PTE_I) == 0 && 1708 was_exec == 0) { 1709 DPRINTFN(ENTER, (" syncicache")); 1710 PMAPCOUNT(exec_synced); 1711 pmap_syncicache(pa, PAGE_SIZE); 1712 if (pg != NULL) { 1713 pmap_attr_save(pg, PTE_EXEC); 1714 PMAPCOUNT(exec_cached); 1715 #if defined(DEBUG) || defined(PMAPDEBUG) 1716 if (pmapdebug & PMAPDEBUG_ENTER) 1717 printf(" marked-as-exec"); 1718 else if (pmapdebug & PMAPDEBUG_EXEC) 1719 printf("[pmap_enter: %#lx: marked-as-exec]\n", 1720 pg->phys_addr); 1721 1722 #endif 1723 } 1724 } 1725 1726 DPRINTFN(ENTER, (": error=%d\n", error)); 1727 1728 return error; 1729 } 1730 1731 void 1732 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot) 1733 { 1734 struct mem_region *mp; 1735 register_t pte_lo; 1736 register_t msr; 1737 int error; 1738 int s; 1739 1740 if (va < VM_MIN_KERNEL_ADDRESS) 1741 panic("pmap_kenter_pa: attempt to enter " 1742 "non-kernel address %#lx!", va); 1743 1744 DPRINTFN(KENTER, 1745 ("pmap_kenter_pa(%#lx,%#lx,%#x)\n", va, pa, prot)); 1746 1747 /* 1748 * Assume the page is cache inhibited and access is guarded unless 1749 * it's in our available memory array. If it is in the memory array, 1750 * asssume it's in memory coherent memory. 1751 */ 1752 pte_lo = PTE_IG; 1753 if ((prot & PMAP_NC) == 0) { 1754 for (mp = mem; mp->size; mp++) { 1755 if (pa >= mp->start && pa < mp->start + mp->size) { 1756 pte_lo = PTE_M; 1757 break; 1758 } 1759 } 1760 } 1761 1762 if (prot & VM_PROT_WRITE) 1763 pte_lo |= PTE_BW; 1764 else 1765 pte_lo |= PTE_BR; 1766 1767 /* 1768 * We don't care about REF/CHG on PVOs on the unmanaged list. 1769 */ 1770 s = splvm(); 1771 msr = pmap_interrupts_off(); 1772 error = pmap_pvo_enter(pmap_kernel(), &pmap_upvo_pool, 1773 &pmap_pvo_kunmanaged, va, pa, pte_lo, prot|PMAP_WIRED); 1774 pmap_interrupts_restore(msr); 1775 splx(s); 1776 1777 if (error != 0) 1778 panic("pmap_kenter_pa: failed to enter va %#lx pa %#lx: %d", 1779 va, pa, error); 1780 } 1781 1782 void 1783 pmap_kremove(vaddr_t va, vsize_t len) 1784 { 1785 if (va < VM_MIN_KERNEL_ADDRESS) 1786 panic("pmap_kremove: attempt to remove " 1787 "non-kernel address %#lx!", va); 1788 1789 DPRINTFN(KREMOVE,("pmap_kremove(%#lx,%#lx)\n", va, len)); 1790 pmap_remove(pmap_kernel(), va, va + len); 1791 } 1792 1793 /* 1794 * Remove the given range of mapping entries. 1795 */ 1796 void 1797 pmap_remove(pmap_t pm, vaddr_t va, vaddr_t endva) 1798 { 1799 struct pvo_entry *pvo; 1800 register_t msr; 1801 int pteidx; 1802 int s; 1803 1804 for (; va < endva; va += PAGE_SIZE) { 1805 s = splvm(); 1806 msr = pmap_interrupts_off(); 1807 pvo = pmap_pvo_find_va(pm, va, &pteidx); 1808 if (pvo != NULL) { 1809 pmap_pvo_remove(pvo, pteidx); 1810 } 1811 pmap_interrupts_restore(msr); 1812 splx(s); 1813 } 1814 } 1815 1816 /* 1817 * Get the physical page address for the given pmap/virtual address. 1818 */ 1819 boolean_t 1820 pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap) 1821 { 1822 struct pvo_entry *pvo; 1823 register_t msr; 1824 int s; 1825 1826 /* 1827 * If this is a kernel pmap lookup, also check the battable 1828 * and if we get a hit, translate the VA to a PA using the 1829 * BAT entries. Don't check for VM_MAX_KENREL_ADDRESS is 1830 * that will wrap back to 0. 1831 */ 1832 if (pm == pmap_kernel() && 1833 (va < VM_MIN_KERNEL_ADDRESS || 1834 (KERNEL2_SR < 15 && VM_MAX_KERNEL_ADDRESS <= va))) { 1835 register_t batu = battable[va >> ADDR_SR_SHFT].batu; 1836 KASSERT((va >> ADDR_SR_SHFT) != USER_SR); 1837 if (BAT_VALID_P(batu,0) && BAT_VA_MATCH_P(batu,va)) { 1838 register_t batl = battable[va >> ADDR_SR_SHFT].batl; 1839 register_t mask = (~(batu & BAT_BL) << 15) & ~0x1ffffL; 1840 *pap = (batl & mask) | (va & ~mask); 1841 return TRUE; 1842 } 1843 return FALSE; 1844 } 1845 1846 s = splvm(); 1847 msr = pmap_interrupts_off(); 1848 pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1849 if (pvo != NULL) { 1850 PMAP_PVO_CHECK(pvo); /* sanity check */ 1851 *pap = (pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); 1852 } 1853 pmap_interrupts_restore(msr); 1854 splx(s); 1855 return pvo != NULL; 1856 } 1857 1858 /* 1859 * Lower the protection on the specified range of this pmap. 1860 * 1861 * There are only two cases: either the protection is going to 0, 1862 * or it is going to read-only. 1863 */ 1864 void 1865 pmap_protect(pmap_t pm, vaddr_t va, vaddr_t endva, vm_prot_t prot) 1866 { 1867 struct pvo_entry *pvo; 1868 volatile struct pte *pt; 1869 register_t msr; 1870 int s; 1871 int pteidx; 1872 1873 /* 1874 * Since this routine only downgrades protection, we should 1875 * always be called without WRITE permisison. 1876 */ 1877 KASSERT((prot & VM_PROT_WRITE) == 0); 1878 1879 /* 1880 * If there is no protection, this is equivalent to 1881 * remove the pmap from the pmap. 1882 */ 1883 if ((prot & VM_PROT_READ) == 0) { 1884 pmap_remove(pm, va, endva); 1885 return; 1886 } 1887 1888 s = splvm(); 1889 msr = pmap_interrupts_off(); 1890 1891 for (; va < endva; va += PAGE_SIZE) { 1892 pvo = pmap_pvo_find_va(pm, va, &pteidx); 1893 if (pvo == NULL) 1894 continue; 1895 PMAP_PVO_CHECK(pvo); /* sanity check */ 1896 1897 /* 1898 * Revoke executable if asked to do so. 1899 */ 1900 if ((prot & VM_PROT_EXECUTE) == 0) 1901 pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 1902 1903 #if 0 1904 /* 1905 * If the page is already read-only, no change 1906 * needs to be made. 1907 */ 1908 if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) 1909 continue; 1910 #endif 1911 /* 1912 * Grab the PTE pointer before we diddle with 1913 * the cached PTE copy. 1914 */ 1915 pt = pmap_pvo_to_pte(pvo, pteidx); 1916 /* 1917 * Change the protection of the page. 1918 */ 1919 pvo->pvo_pte.pte_lo &= ~PTE_PP; 1920 pvo->pvo_pte.pte_lo |= PTE_BR; 1921 1922 /* 1923 * If the PVO is in the page table, update 1924 * that pte at well. 1925 */ 1926 if (pt != NULL) { 1927 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1928 PMAPCOUNT(ptes_changed); 1929 } 1930 1931 PMAP_PVO_CHECK(pvo); /* sanity check */ 1932 } 1933 1934 pmap_interrupts_restore(msr); 1935 splx(s); 1936 } 1937 1938 void 1939 pmap_unwire(pmap_t pm, vaddr_t va) 1940 { 1941 struct pvo_entry *pvo; 1942 register_t msr; 1943 int s; 1944 1945 s = splvm(); 1946 msr = pmap_interrupts_off(); 1947 1948 pvo = pmap_pvo_find_va(pm, va, NULL); 1949 if (pvo != NULL) { 1950 if (pvo->pvo_vaddr & PVO_WIRED) { 1951 pvo->pvo_vaddr &= ~PVO_WIRED; 1952 pm->pm_stats.wired_count--; 1953 } 1954 PMAP_PVO_CHECK(pvo); /* sanity check */ 1955 } 1956 1957 pmap_interrupts_restore(msr); 1958 splx(s); 1959 } 1960 1961 /* 1962 * Lower the protection on the specified physical page. 1963 * 1964 * There are only two cases: either the protection is going to 0, 1965 * or it is going to read-only. 1966 */ 1967 void 1968 pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 1969 { 1970 struct pvo_head *pvo_head; 1971 struct pvo_entry *pvo, *next_pvo; 1972 volatile struct pte *pt; 1973 register_t msr; 1974 int s; 1975 1976 /* 1977 * Since this routine only downgrades protection, if the 1978 * maximal protection is desired, there isn't any change 1979 * to be made. 1980 */ 1981 KASSERT((prot & VM_PROT_WRITE) == 0); 1982 if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == (VM_PROT_READ|VM_PROT_WRITE)) 1983 return; 1984 1985 s = splvm(); 1986 msr = pmap_interrupts_off(); 1987 1988 /* 1989 * When UVM reuses a page, it does a pmap_page_protect with 1990 * VM_PROT_NONE. At that point, we can clear the exec flag 1991 * since we know the page will have different contents. 1992 */ 1993 if ((prot & VM_PROT_READ) == 0) { 1994 DPRINTFN(EXEC, ("[pmap_page_protect: %#lx: clear-exec]\n", 1995 pg->phys_addr)); 1996 if (pmap_attr_fetch(pg) & PTE_EXEC) { 1997 PMAPCOUNT(exec_uncached_page_protect); 1998 pmap_attr_clear(pg, PTE_EXEC); 1999 } 2000 } 2001 2002 pvo_head = vm_page_to_pvoh(pg); 2003 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 2004 next_pvo = LIST_NEXT(pvo, pvo_vlink); 2005 PMAP_PVO_CHECK(pvo); /* sanity check */ 2006 2007 /* 2008 * Downgrading to no mapping at all, we just remove the entry. 2009 */ 2010 if ((prot & VM_PROT_READ) == 0) { 2011 pmap_pvo_remove(pvo, -1); 2012 continue; 2013 } 2014 2015 /* 2016 * If EXEC permission is being revoked, just clear the 2017 * flag in the PVO. 2018 */ 2019 if ((prot & VM_PROT_EXECUTE) == 0) 2020 pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 2021 2022 /* 2023 * If this entry is already RO, don't diddle with the 2024 * page table. 2025 */ 2026 if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) { 2027 PMAP_PVO_CHECK(pvo); 2028 continue; 2029 } 2030 2031 /* 2032 * Grab the PTE before the we diddle the bits so 2033 * pvo_to_pte can verify the pte contents are as 2034 * expected. 2035 */ 2036 pt = pmap_pvo_to_pte(pvo, -1); 2037 pvo->pvo_pte.pte_lo &= ~PTE_PP; 2038 pvo->pvo_pte.pte_lo |= PTE_BR; 2039 if (pt != NULL) { 2040 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 2041 PMAPCOUNT(ptes_changed); 2042 } 2043 PMAP_PVO_CHECK(pvo); /* sanity check */ 2044 } 2045 2046 pmap_interrupts_restore(msr); 2047 splx(s); 2048 } 2049 2050 /* 2051 * Activate the address space for the specified process. If the process 2052 * is the current process, load the new MMU context. 2053 */ 2054 void 2055 pmap_activate(struct lwp *l) 2056 { 2057 struct pcb *pcb = &l->l_addr->u_pcb; 2058 pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap; 2059 2060 DPRINTFN(ACTIVATE, 2061 ("pmap_activate: lwp %p (curlwp %p)\n", l, curlwp)); 2062 2063 /* 2064 * XXX Normally performed in cpu_fork(). 2065 */ 2066 if (pcb->pcb_pm != pmap) { 2067 pcb->pcb_pm = pmap; 2068 pcb->pcb_pmreal = pmap; 2069 } 2070 2071 /* 2072 * In theory, the SR registers need only be valid on return 2073 * to user space wait to do them there. 2074 */ 2075 if (l == curlwp) { 2076 /* Store pointer to new current pmap. */ 2077 curpm = pmap; 2078 } 2079 } 2080 2081 /* 2082 * Deactivate the specified process's address space. 2083 */ 2084 void 2085 pmap_deactivate(struct lwp *l) 2086 { 2087 } 2088 2089 boolean_t 2090 pmap_query_bit(struct vm_page *pg, int ptebit) 2091 { 2092 struct pvo_entry *pvo; 2093 volatile struct pte *pt; 2094 register_t msr; 2095 int s; 2096 2097 if (pmap_attr_fetch(pg) & ptebit) 2098 return TRUE; 2099 s = splvm(); 2100 msr = pmap_interrupts_off(); 2101 LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) { 2102 PMAP_PVO_CHECK(pvo); /* sanity check */ 2103 /* 2104 * See if we saved the bit off. If so cache, it and return 2105 * success. 2106 */ 2107 if (pvo->pvo_pte.pte_lo & ptebit) { 2108 pmap_attr_save(pg, ptebit); 2109 PMAP_PVO_CHECK(pvo); /* sanity check */ 2110 pmap_interrupts_restore(msr); 2111 splx(s); 2112 return TRUE; 2113 } 2114 } 2115 /* 2116 * No luck, now go thru the hard part of looking at the ptes 2117 * themselves. Sync so any pending REF/CHG bits are flushed 2118 * to the PTEs. 2119 */ 2120 SYNC(); 2121 LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) { 2122 PMAP_PVO_CHECK(pvo); /* sanity check */ 2123 /* 2124 * See if this pvo have a valid PTE. If so, fetch the 2125 * REF/CHG bits from the valid PTE. If the appropriate 2126 * ptebit is set, cache, it and return success. 2127 */ 2128 pt = pmap_pvo_to_pte(pvo, -1); 2129 if (pt != NULL) { 2130 pmap_pte_synch(pt, &pvo->pvo_pte); 2131 if (pvo->pvo_pte.pte_lo & ptebit) { 2132 pmap_attr_save(pg, ptebit); 2133 PMAP_PVO_CHECK(pvo); /* sanity check */ 2134 pmap_interrupts_restore(msr); 2135 splx(s); 2136 return TRUE; 2137 } 2138 } 2139 } 2140 pmap_interrupts_restore(msr); 2141 splx(s); 2142 return FALSE; 2143 } 2144 2145 boolean_t 2146 pmap_clear_bit(struct vm_page *pg, int ptebit) 2147 { 2148 struct pvo_head *pvoh = vm_page_to_pvoh(pg); 2149 struct pvo_entry *pvo; 2150 volatile struct pte *pt; 2151 register_t msr; 2152 int rv = 0; 2153 int s; 2154 2155 s = splvm(); 2156 msr = pmap_interrupts_off(); 2157 2158 /* 2159 * Fetch the cache value 2160 */ 2161 rv |= pmap_attr_fetch(pg); 2162 2163 /* 2164 * Clear the cached value. 2165 */ 2166 pmap_attr_clear(pg, ptebit); 2167 2168 /* 2169 * Sync so any pending REF/CHG bits are flushed to the PTEs (so we 2170 * can reset the right ones). Note that since the pvo entries and 2171 * list heads are accessed via BAT0 and are never placed in the 2172 * page table, we don't have to worry about further accesses setting 2173 * the REF/CHG bits. 2174 */ 2175 SYNC(); 2176 2177 /* 2178 * For each pvo entry, clear pvo's ptebit. If this pvo have a 2179 * valid PTE. If so, clear the ptebit from the valid PTE. 2180 */ 2181 LIST_FOREACH(pvo, pvoh, pvo_vlink) { 2182 PMAP_PVO_CHECK(pvo); /* sanity check */ 2183 pt = pmap_pvo_to_pte(pvo, -1); 2184 if (pt != NULL) { 2185 /* 2186 * Only sync the PTE if the bit we are looking 2187 * for is not already set. 2188 */ 2189 if ((pvo->pvo_pte.pte_lo & ptebit) == 0) 2190 pmap_pte_synch(pt, &pvo->pvo_pte); 2191 /* 2192 * If the bit we are looking for was already set, 2193 * clear that bit in the pte. 2194 */ 2195 if (pvo->pvo_pte.pte_lo & ptebit) 2196 pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit); 2197 } 2198 rv |= pvo->pvo_pte.pte_lo & (PTE_CHG|PTE_REF); 2199 pvo->pvo_pte.pte_lo &= ~ptebit; 2200 PMAP_PVO_CHECK(pvo); /* sanity check */ 2201 } 2202 pmap_interrupts_restore(msr); 2203 splx(s); 2204 /* 2205 * If we are clearing the modify bit and this page was marked EXEC 2206 * and the user of the page thinks the page was modified, then we 2207 * need to clean it from the icache if it's mapped or clear the EXEC 2208 * bit if it's not mapped. The page itself might not have the CHG 2209 * bit set if the modification was done via DMA to the page. 2210 */ 2211 if ((ptebit & PTE_CHG) && (rv & PTE_EXEC)) { 2212 if (LIST_EMPTY(pvoh)) { 2213 DPRINTFN(EXEC, ("[pmap_clear_bit: %#lx: clear-exec]\n", 2214 pg->phys_addr)); 2215 pmap_attr_clear(pg, PTE_EXEC); 2216 PMAPCOUNT(exec_uncached_clear_modify); 2217 } else { 2218 DPRINTFN(EXEC, ("[pmap_clear_bit: %#lx: syncicache]\n", 2219 pg->phys_addr)); 2220 pmap_syncicache(pg->phys_addr, PAGE_SIZE); 2221 PMAPCOUNT(exec_synced_clear_modify); 2222 } 2223 } 2224 return (rv & ptebit) != 0; 2225 } 2226 2227 void 2228 pmap_procwr(struct proc *p, vaddr_t va, size_t len) 2229 { 2230 struct pvo_entry *pvo; 2231 size_t offset = va & ADDR_POFF; 2232 int s; 2233 2234 s = splvm(); 2235 while (len > 0) { 2236 size_t seglen = PAGE_SIZE - offset; 2237 if (seglen > len) 2238 seglen = len; 2239 pvo = pmap_pvo_find_va(p->p_vmspace->vm_map.pmap, va, NULL); 2240 if (pvo != NULL && PVO_ISEXECUTABLE(pvo)) { 2241 pmap_syncicache( 2242 (pvo->pvo_pte.pte_lo & PTE_RPGN) | offset, seglen); 2243 PMAP_PVO_CHECK(pvo); 2244 } 2245 va += seglen; 2246 len -= seglen; 2247 offset = 0; 2248 } 2249 splx(s); 2250 } 2251 2252 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 2253 void 2254 pmap_pte_print(volatile struct pte *pt) 2255 { 2256 printf("PTE %p: ", pt); 2257 /* High word: */ 2258 printf("0x%08lx: [", pt->pte_hi); 2259 printf("%c ", (pt->pte_hi & PTE_VALID) ? 'v' : 'i'); 2260 printf("%c ", (pt->pte_hi & PTE_HID) ? 'h' : '-'); 2261 printf("0x%06lx 0x%02lx", 2262 (pt->pte_hi &~ PTE_VALID)>>PTE_VSID_SHFT, 2263 pt->pte_hi & PTE_API); 2264 printf(" (va 0x%08lx)] ", pmap_pte_to_va(pt)); 2265 /* Low word: */ 2266 printf(" 0x%08lx: [", pt->pte_lo); 2267 printf("0x%05lx... ", pt->pte_lo >> 12); 2268 printf("%c ", (pt->pte_lo & PTE_REF) ? 'r' : 'u'); 2269 printf("%c ", (pt->pte_lo & PTE_CHG) ? 'c' : 'n'); 2270 printf("%c", (pt->pte_lo & PTE_W) ? 'w' : '.'); 2271 printf("%c", (pt->pte_lo & PTE_I) ? 'i' : '.'); 2272 printf("%c", (pt->pte_lo & PTE_M) ? 'm' : '.'); 2273 printf("%c ", (pt->pte_lo & PTE_G) ? 'g' : '.'); 2274 switch (pt->pte_lo & PTE_PP) { 2275 case PTE_BR: printf("br]\n"); break; 2276 case PTE_BW: printf("bw]\n"); break; 2277 case PTE_SO: printf("so]\n"); break; 2278 case PTE_SW: printf("sw]\n"); break; 2279 } 2280 } 2281 #endif 2282 2283 #if defined(DDB) 2284 void 2285 pmap_pteg_check(void) 2286 { 2287 volatile struct pte *pt; 2288 int i; 2289 int ptegidx; 2290 u_int p_valid = 0; 2291 u_int s_valid = 0; 2292 u_int invalid = 0; 2293 2294 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) { 2295 for (pt = pmap_pteg_table[ptegidx].pt, i = 8; --i >= 0; pt++) { 2296 if (pt->pte_hi & PTE_VALID) { 2297 if (pt->pte_hi & PTE_HID) 2298 s_valid++; 2299 else 2300 p_valid++; 2301 } else 2302 invalid++; 2303 } 2304 } 2305 printf("pteg_check: v(p) %#x (%d), v(s) %#x (%d), i %#x (%d)\n", 2306 p_valid, p_valid, s_valid, s_valid, 2307 invalid, invalid); 2308 } 2309 2310 void 2311 pmap_print_mmuregs(void) 2312 { 2313 int i; 2314 u_int cpuvers; 2315 vaddr_t addr; 2316 register_t soft_sr[16]; 2317 struct bat soft_ibat[4]; 2318 struct bat soft_dbat[4]; 2319 register_t sdr1; 2320 2321 cpuvers = MFPVR() >> 16; 2322 2323 __asm __volatile ("mfsdr1 %0" : "=r"(sdr1)); 2324 for (i=0; i<16; i++) { 2325 soft_sr[i] = MFSRIN(addr); 2326 addr += (1 << ADDR_SR_SHFT); 2327 } 2328 2329 /* read iBAT (601: uBAT) registers */ 2330 __asm __volatile ("mfibatu %0,0" : "=r"(soft_ibat[0].batu)); 2331 __asm __volatile ("mfibatl %0,0" : "=r"(soft_ibat[0].batl)); 2332 __asm __volatile ("mfibatu %0,1" : "=r"(soft_ibat[1].batu)); 2333 __asm __volatile ("mfibatl %0,1" : "=r"(soft_ibat[1].batl)); 2334 __asm __volatile ("mfibatu %0,2" : "=r"(soft_ibat[2].batu)); 2335 __asm __volatile ("mfibatl %0,2" : "=r"(soft_ibat[2].batl)); 2336 __asm __volatile ("mfibatu %0,3" : "=r"(soft_ibat[3].batu)); 2337 __asm __volatile ("mfibatl %0,3" : "=r"(soft_ibat[3].batl)); 2338 2339 2340 if (cpuvers != MPC601) { 2341 /* read dBAT registers */ 2342 __asm __volatile ("mfdbatu %0,0" : "=r"(soft_dbat[0].batu)); 2343 __asm __volatile ("mfdbatl %0,0" : "=r"(soft_dbat[0].batl)); 2344 __asm __volatile ("mfdbatu %0,1" : "=r"(soft_dbat[1].batu)); 2345 __asm __volatile ("mfdbatl %0,1" : "=r"(soft_dbat[1].batl)); 2346 __asm __volatile ("mfdbatu %0,2" : "=r"(soft_dbat[2].batu)); 2347 __asm __volatile ("mfdbatl %0,2" : "=r"(soft_dbat[2].batl)); 2348 __asm __volatile ("mfdbatu %0,3" : "=r"(soft_dbat[3].batu)); 2349 __asm __volatile ("mfdbatl %0,3" : "=r"(soft_dbat[3].batl)); 2350 } 2351 2352 printf("SDR1:\t%#lx\n", sdr1); 2353 printf("SR[]:\t"); 2354 addr = 0; 2355 for (i=0; i<4; i++) 2356 printf("0x%08lx, ", soft_sr[i]); 2357 printf("\n\t"); 2358 for ( ; i<8; i++) 2359 printf("0x%08lx, ", soft_sr[i]); 2360 printf("\n\t"); 2361 for ( ; i<12; i++) 2362 printf("0x%08lx, ", soft_sr[i]); 2363 printf("\n\t"); 2364 for ( ; i<16; i++) 2365 printf("0x%08lx, ", soft_sr[i]); 2366 printf("\n"); 2367 2368 printf("%cBAT[]:\t", cpuvers == MPC601 ? 'u' : 'i'); 2369 for (i=0; i<4; i++) { 2370 printf("0x%08lx 0x%08lx, ", 2371 soft_ibat[i].batu, soft_ibat[i].batl); 2372 if (i == 1) 2373 printf("\n\t"); 2374 } 2375 if (cpuvers != MPC601) { 2376 printf("\ndBAT[]:\t"); 2377 for (i=0; i<4; i++) { 2378 printf("0x%08lx 0x%08lx, ", 2379 soft_dbat[i].batu, soft_dbat[i].batl); 2380 if (i == 1) 2381 printf("\n\t"); 2382 } 2383 } 2384 printf("\n"); 2385 } 2386 2387 void 2388 pmap_print_pte(pmap_t pm, vaddr_t va) 2389 { 2390 struct pvo_entry *pvo; 2391 volatile struct pte *pt; 2392 int pteidx; 2393 2394 pvo = pmap_pvo_find_va(pm, va, &pteidx); 2395 if (pvo != NULL) { 2396 pt = pmap_pvo_to_pte(pvo, pteidx); 2397 if (pt != NULL) { 2398 printf("VA %#lx -> %p -> %s %#lx, %#lx\n", 2399 va, pt, 2400 pt->pte_hi & PTE_HID ? "(sec)" : "(pri)", 2401 pt->pte_hi, pt->pte_lo); 2402 } else { 2403 printf("No valid PTE found\n"); 2404 } 2405 } else { 2406 printf("Address not in pmap\n"); 2407 } 2408 } 2409 2410 void 2411 pmap_pteg_dist(void) 2412 { 2413 struct pvo_entry *pvo; 2414 int ptegidx; 2415 int depth; 2416 int max_depth = 0; 2417 unsigned int depths[64]; 2418 2419 memset(depths, 0, sizeof(depths)); 2420 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) { 2421 depth = 0; 2422 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 2423 depth++; 2424 } 2425 if (depth > max_depth) 2426 max_depth = depth; 2427 if (depth > 63) 2428 depth = 63; 2429 depths[depth]++; 2430 } 2431 2432 for (depth = 0; depth < 64; depth++) { 2433 printf(" [%2d]: %8u", depth, depths[depth]); 2434 if ((depth & 3) == 3) 2435 printf("\n"); 2436 if (depth == max_depth) 2437 break; 2438 } 2439 if ((depth & 3) != 3) 2440 printf("\n"); 2441 printf("Max depth found was %d\n", max_depth); 2442 } 2443 #endif /* DEBUG */ 2444 2445 #if defined(PMAPCHECK) || defined(DEBUG) 2446 void 2447 pmap_pvo_verify(void) 2448 { 2449 int ptegidx; 2450 int s; 2451 2452 s = splvm(); 2453 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) { 2454 struct pvo_entry *pvo; 2455 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 2456 if ((uintptr_t) pvo >= SEGMENT_LENGTH) 2457 panic("pmap_pvo_verify: invalid pvo %p " 2458 "on list %#x", pvo, ptegidx); 2459 pmap_pvo_check(pvo); 2460 } 2461 } 2462 splx(s); 2463 } 2464 #endif /* PMAPCHECK */ 2465 2466 2467 void * 2468 pmap_pool_ualloc(struct pool *pp, int flags) 2469 { 2470 struct pvo_page *pvop; 2471 2472 pvop = SIMPLEQ_FIRST(&pmap_upvop_head); 2473 if (pvop != NULL) { 2474 pmap_upvop_free--; 2475 SIMPLEQ_REMOVE_HEAD(&pmap_upvop_head, pvop_link); 2476 return pvop; 2477 } 2478 if (uvm.page_init_done != TRUE) { 2479 return (void *) uvm_pageboot_alloc(PAGE_SIZE); 2480 } 2481 return pmap_pool_malloc(pp, flags); 2482 } 2483 2484 void * 2485 pmap_pool_malloc(struct pool *pp, int flags) 2486 { 2487 struct pvo_page *pvop; 2488 struct vm_page *pg; 2489 2490 pvop = SIMPLEQ_FIRST(&pmap_mpvop_head); 2491 if (pvop != NULL) { 2492 pmap_mpvop_free--; 2493 SIMPLEQ_REMOVE_HEAD(&pmap_mpvop_head, pvop_link); 2494 return pvop; 2495 } 2496 again: 2497 pg = uvm_pagealloc_strat(NULL, 0, NULL, UVM_PGA_USERESERVE, 2498 UVM_PGA_STRAT_ONLY, VM_FREELIST_FIRST256); 2499 if (__predict_false(pg == NULL)) { 2500 if (flags & PR_WAITOK) { 2501 uvm_wait("plpg"); 2502 goto again; 2503 } else { 2504 return (0); 2505 } 2506 } 2507 return (void *) VM_PAGE_TO_PHYS(pg); 2508 } 2509 2510 void 2511 pmap_pool_ufree(struct pool *pp, void *va) 2512 { 2513 struct pvo_page *pvop; 2514 #if 0 2515 if (PHYS_TO_VM_PAGE((paddr_t) va) != NULL) { 2516 pmap_pool_mfree(va, size, tag); 2517 return; 2518 } 2519 #endif 2520 pvop = va; 2521 SIMPLEQ_INSERT_HEAD(&pmap_upvop_head, pvop, pvop_link); 2522 pmap_upvop_free++; 2523 if (pmap_upvop_free > pmap_upvop_maxfree) 2524 pmap_upvop_maxfree = pmap_upvop_free; 2525 } 2526 2527 void 2528 pmap_pool_mfree(struct pool *pp, void *va) 2529 { 2530 struct pvo_page *pvop; 2531 2532 pvop = va; 2533 SIMPLEQ_INSERT_HEAD(&pmap_mpvop_head, pvop, pvop_link); 2534 pmap_mpvop_free++; 2535 if (pmap_mpvop_free > pmap_mpvop_maxfree) 2536 pmap_mpvop_maxfree = pmap_mpvop_free; 2537 #if 0 2538 uvm_pagefree(PHYS_TO_VM_PAGE((paddr_t) va)); 2539 #endif 2540 } 2541 2542 /* 2543 * This routine in bootstraping to steal to-be-managed memory (which will 2544 * then be unmanaged). We use it to grab from the first 256MB for our 2545 * pmap needs and above 256MB for other stuff. 2546 */ 2547 vaddr_t 2548 pmap_steal_memory(vsize_t vsize, vaddr_t *vstartp, vaddr_t *vendp) 2549 { 2550 vsize_t size; 2551 vaddr_t va; 2552 paddr_t pa = 0; 2553 int npgs, bank; 2554 struct vm_physseg *ps; 2555 2556 if (uvm.page_init_done == TRUE) 2557 panic("pmap_steal_memory: called _after_ bootstrap"); 2558 2559 *vstartp = VM_MIN_KERNEL_ADDRESS; 2560 *vendp = VM_MAX_KERNEL_ADDRESS; 2561 2562 size = round_page(vsize); 2563 npgs = atop(size); 2564 2565 /* 2566 * PA 0 will never be among those given to UVM so we can use it 2567 * to indicate we couldn't steal any memory. 2568 */ 2569 for (ps = vm_physmem, bank = 0; bank < vm_nphysseg; bank++, ps++) { 2570 if (ps->free_list == VM_FREELIST_FIRST256 && 2571 ps->avail_end - ps->avail_start >= npgs) { 2572 pa = ptoa(ps->avail_start); 2573 break; 2574 } 2575 } 2576 2577 if (pa == 0) 2578 panic("pmap_steal_memory: no approriate memory to steal!"); 2579 2580 ps->avail_start += npgs; 2581 ps->start += npgs; 2582 2583 /* 2584 * If we've used up all the pages in the segment, remove it and 2585 * compact the list. 2586 */ 2587 if (ps->avail_start == ps->end) { 2588 /* 2589 * If this was the last one, then a very bad thing has occurred 2590 */ 2591 if (--vm_nphysseg == 0) 2592 panic("pmap_steal_memory: out of memory!"); 2593 2594 printf("pmap_steal_memory: consumed bank %d\n", bank); 2595 for (; bank < vm_nphysseg; bank++, ps++) { 2596 ps[0] = ps[1]; 2597 } 2598 } 2599 2600 va = (vaddr_t) pa; 2601 memset((caddr_t) va, 0, size); 2602 pmap_pages_stolen += npgs; 2603 #ifdef DEBUG 2604 if (pmapdebug && npgs > 1) { 2605 u_int cnt = 0; 2606 for (bank = 0, ps = vm_physmem; bank < vm_nphysseg; bank++, ps++) 2607 cnt += ps->avail_end - ps->avail_start; 2608 printf("pmap_steal_memory: stole %u (total %u) pages (%u left)\n", 2609 npgs, pmap_pages_stolen, cnt); 2610 } 2611 #endif 2612 2613 return va; 2614 } 2615 2616 /* 2617 * Find a chuck of memory with right size and alignment. 2618 */ 2619 void * 2620 pmap_boot_find_memory(psize_t size, psize_t alignment, int at_end) 2621 { 2622 struct mem_region *mp; 2623 paddr_t s, e; 2624 int i, j; 2625 2626 size = round_page(size); 2627 2628 DPRINTFN(BOOT, 2629 ("pmap_boot_find_memory: size=%lx, alignment=%lx, at_end=%d", 2630 size, alignment, at_end)); 2631 2632 if (alignment < PAGE_SIZE || (alignment & (alignment-1)) != 0) 2633 panic("pmap_boot_find_memory: invalid alignment %lx", 2634 alignment); 2635 2636 if (at_end) { 2637 if (alignment != PAGE_SIZE) 2638 panic("pmap_boot_find_memory: invalid ending " 2639 "alignment %lx", alignment); 2640 2641 for (mp = &avail[avail_cnt-1]; mp >= avail; mp--) { 2642 s = mp->start + mp->size - size; 2643 if (s >= mp->start && mp->size >= size) { 2644 DPRINTFN(BOOT,(": %lx\n", s)); 2645 DPRINTFN(BOOT, 2646 ("pmap_boot_find_memory: b-avail[%d] start " 2647 "0x%lx size 0x%lx\n", mp - avail, 2648 mp->start, mp->size)); 2649 mp->size -= size; 2650 DPRINTFN(BOOT, 2651 ("pmap_boot_find_memory: a-avail[%d] start " 2652 "0x%lx size 0x%lx\n", mp - avail, 2653 mp->start, mp->size)); 2654 return (void *) s; 2655 } 2656 } 2657 panic("pmap_boot_find_memory: no available memory"); 2658 } 2659 2660 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) { 2661 s = (mp->start + alignment - 1) & ~(alignment-1); 2662 e = s + size; 2663 2664 /* 2665 * Is the calculated region entirely within the region? 2666 */ 2667 if (s < mp->start || e > mp->start + mp->size) 2668 continue; 2669 2670 DPRINTFN(BOOT,(": %lx\n", s)); 2671 if (s == mp->start) { 2672 /* 2673 * If the block starts at the beginning of region, 2674 * adjust the size & start. (the region may now be 2675 * zero in length) 2676 */ 2677 DPRINTFN(BOOT, 2678 ("pmap_boot_find_memory: b-avail[%d] start " 2679 "0x%lx size 0x%lx\n", i, mp->start, mp->size)); 2680 mp->start += size; 2681 mp->size -= size; 2682 DPRINTFN(BOOT, 2683 ("pmap_boot_find_memory: a-avail[%d] start " 2684 "0x%lx size 0x%lx\n", i, mp->start, mp->size)); 2685 } else if (e == mp->start + mp->size) { 2686 /* 2687 * If the block starts at the beginning of region, 2688 * adjust only the size. 2689 */ 2690 DPRINTFN(BOOT, 2691 ("pmap_boot_find_memory: b-avail[%d] start " 2692 "0x%lx size 0x%lx\n", i, mp->start, mp->size)); 2693 mp->size -= size; 2694 DPRINTFN(BOOT, 2695 ("pmap_boot_find_memory: a-avail[%d] start " 2696 "0x%lx size 0x%lx\n", i, mp->start, mp->size)); 2697 } else { 2698 /* 2699 * Block is in the middle of the region, so we 2700 * have to split it in two. 2701 */ 2702 for (j = avail_cnt; j > i + 1; j--) { 2703 avail[j] = avail[j-1]; 2704 } 2705 DPRINTFN(BOOT, 2706 ("pmap_boot_find_memory: b-avail[%d] start " 2707 "0x%lx size 0x%lx\n", i, mp->start, mp->size)); 2708 mp[1].start = e; 2709 mp[1].size = mp[0].start + mp[0].size - e; 2710 mp[0].size = s - mp[0].start; 2711 avail_cnt++; 2712 for (; i < avail_cnt; i++) { 2713 DPRINTFN(BOOT, 2714 ("pmap_boot_find_memory: a-avail[%d] " 2715 "start 0x%lx size 0x%lx\n", i, 2716 avail[i].start, avail[i].size)); 2717 } 2718 } 2719 return (void *) s; 2720 } 2721 panic("pmap_boot_find_memory: not enough memory for " 2722 "%lx/%lx allocation?", size, alignment); 2723 } 2724 2725 /* 2726 * This is not part of the defined PMAP interface and is specific to the 2727 * PowerPC architecture. This is called during initppc, before the system 2728 * is really initialized. 2729 */ 2730 void 2731 pmap_bootstrap(paddr_t kernelstart, paddr_t kernelend) 2732 { 2733 struct mem_region *mp, tmp; 2734 paddr_t s, e; 2735 psize_t size; 2736 int i, j; 2737 2738 /* 2739 * Get memory. 2740 */ 2741 mem_regions(&mem, &avail); 2742 #if defined(DEBUG) 2743 if (pmapdebug & PMAPDEBUG_BOOT) { 2744 printf("pmap_bootstrap: memory configuration:\n"); 2745 for (mp = mem; mp->size; mp++) { 2746 printf("pmap_bootstrap: mem start 0x%lx size 0x%lx\n", 2747 mp->start, mp->size); 2748 } 2749 for (mp = avail; mp->size; mp++) { 2750 printf("pmap_bootstrap: avail start 0x%lx size 0x%lx\n", 2751 mp->start, mp->size); 2752 } 2753 } 2754 #endif 2755 2756 /* 2757 * Find out how much physical memory we have and in how many chunks. 2758 */ 2759 for (mem_cnt = 0, mp = mem; mp->size; mp++) { 2760 if (mp->start >= pmap_memlimit) 2761 continue; 2762 if (mp->start + mp->size > pmap_memlimit) { 2763 size = pmap_memlimit - mp->start; 2764 physmem += btoc(size); 2765 } else { 2766 physmem += btoc(mp->size); 2767 } 2768 mem_cnt++; 2769 } 2770 2771 /* 2772 * Count the number of available entries. 2773 */ 2774 for (avail_cnt = 0, mp = avail; mp->size; mp++) 2775 avail_cnt++; 2776 2777 /* 2778 * Page align all regions. 2779 */ 2780 kernelstart = trunc_page(kernelstart); 2781 kernelend = round_page(kernelend); 2782 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) { 2783 s = round_page(mp->start); 2784 mp->size -= (s - mp->start); 2785 mp->size = trunc_page(mp->size); 2786 mp->start = s; 2787 e = mp->start + mp->size; 2788 2789 DPRINTFN(BOOT, 2790 ("pmap_bootstrap: b-avail[%d] start 0x%lx size 0x%lx\n", 2791 i, mp->start, mp->size)); 2792 2793 /* 2794 * Don't allow the end to run beyond our artificial limit 2795 */ 2796 if (e > pmap_memlimit) 2797 e = pmap_memlimit; 2798 2799 /* 2800 * Is this region empty or strange? skip it. 2801 */ 2802 if (e <= s) { 2803 mp->start = 0; 2804 mp->size = 0; 2805 continue; 2806 } 2807 2808 /* 2809 * Does this overlap the beginning of kernel? 2810 * Does extend past the end of the kernel? 2811 */ 2812 else if (s < kernelstart && e > kernelstart) { 2813 if (e > kernelend) { 2814 avail[avail_cnt].start = kernelend; 2815 avail[avail_cnt].size = e - kernelend; 2816 avail_cnt++; 2817 } 2818 mp->size = kernelstart - s; 2819 } 2820 /* 2821 * Check whether this region overlaps the end of the kernel. 2822 */ 2823 else if (s < kernelend && e > kernelend) { 2824 mp->start = kernelend; 2825 mp->size = e - kernelend; 2826 } 2827 /* 2828 * Look whether this regions is completely inside the kernel. 2829 * Nuke it if it does. 2830 */ 2831 else if (s >= kernelstart && e <= kernelend) { 2832 mp->start = 0; 2833 mp->size = 0; 2834 } 2835 /* 2836 * If the user imposed a memory limit, enforce it. 2837 */ 2838 else if (s >= pmap_memlimit) { 2839 mp->start = -PAGE_SIZE; /* let's know why */ 2840 mp->size = 0; 2841 } 2842 else { 2843 mp->start = s; 2844 mp->size = e - s; 2845 } 2846 DPRINTFN(BOOT, 2847 ("pmap_bootstrap: a-avail[%d] start 0x%lx size 0x%lx\n", 2848 i, mp->start, mp->size)); 2849 } 2850 2851 /* 2852 * Move (and uncount) all the null return to the end. 2853 */ 2854 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) { 2855 if (mp->size == 0) { 2856 tmp = avail[i]; 2857 avail[i] = avail[--avail_cnt]; 2858 avail[avail_cnt] = avail[i]; 2859 } 2860 } 2861 2862 /* 2863 * (Bubble)sort them into asecnding order. 2864 */ 2865 for (i = 0; i < avail_cnt; i++) { 2866 for (j = i + 1; j < avail_cnt; j++) { 2867 if (avail[i].start > avail[j].start) { 2868 tmp = avail[i]; 2869 avail[i] = avail[j]; 2870 avail[j] = tmp; 2871 } 2872 } 2873 } 2874 2875 /* 2876 * Make sure they don't overlap. 2877 */ 2878 for (mp = avail, i = 0; i < avail_cnt - 1; i++, mp++) { 2879 if (mp[0].start + mp[0].size > mp[1].start) { 2880 mp[0].size = mp[1].start - mp[0].start; 2881 } 2882 DPRINTFN(BOOT, 2883 ("pmap_bootstrap: avail[%d] start 0x%lx size 0x%lx\n", 2884 i, mp->start, mp->size)); 2885 } 2886 DPRINTFN(BOOT, 2887 ("pmap_bootstrap: avail[%d] start 0x%lx size 0x%lx\n", 2888 i, mp->start, mp->size)); 2889 2890 #ifdef PTEGCOUNT 2891 pmap_pteg_cnt = PTEGCOUNT; 2892 #else /* PTEGCOUNT */ 2893 pmap_pteg_cnt = 0x1000; 2894 2895 while (pmap_pteg_cnt < physmem) 2896 pmap_pteg_cnt <<= 1; 2897 2898 pmap_pteg_cnt >>= 1; 2899 #endif /* PTEGCOUNT */ 2900 2901 /* 2902 * Find suitably aligned memory for PTEG hash table. 2903 */ 2904 size = pmap_pteg_cnt * sizeof(struct pteg); 2905 pmap_pteg_table = pmap_boot_find_memory(size, size, 0); 2906 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 2907 if ( (uintptr_t) pmap_pteg_table + size > SEGMENT_LENGTH) 2908 panic("pmap_bootstrap: pmap_pteg_table end (%p + %lx) > 256MB", 2909 pmap_pteg_table, size); 2910 #endif 2911 2912 memset((void *)pmap_pteg_table, 0, pmap_pteg_cnt * sizeof(struct pteg)); 2913 pmap_pteg_mask = pmap_pteg_cnt - 1; 2914 2915 /* 2916 * We cannot do pmap_steal_memory here since UVM hasn't been loaded 2917 * with pages. So we just steal them before giving them to UVM. 2918 */ 2919 size = sizeof(pmap_pvo_table[0]) * pmap_pteg_cnt; 2920 pmap_pvo_table = pmap_boot_find_memory(size, PAGE_SIZE, 0); 2921 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 2922 if ( (uintptr_t) pmap_pvo_table + size > SEGMENT_LENGTH) 2923 panic("pmap_bootstrap: pmap_pvo_table end (%p + %lx) > 256MB", 2924 pmap_pvo_table, size); 2925 #endif 2926 2927 for (i = 0; i < pmap_pteg_cnt; i++) 2928 TAILQ_INIT(&pmap_pvo_table[i]); 2929 2930 #ifndef MSGBUFADDR 2931 /* 2932 * Allocate msgbuf in high memory. 2933 */ 2934 msgbuf_paddr = 2935 (paddr_t) pmap_boot_find_memory(MSGBUFSIZE, PAGE_SIZE, 1); 2936 #endif 2937 2938 #ifdef __HAVE_PMAP_PHYSSEG 2939 { 2940 u_int npgs = 0; 2941 for (i = 0, mp = avail; i < avail_cnt; i++, mp++) 2942 npgs += btoc(mp->size); 2943 size = (sizeof(struct pvo_head) + 1) * npgs; 2944 pmap_physseg.pvoh = pmap_boot_find_memory(size, PAGE_SIZE, 0); 2945 pmap_physseg.attrs = (char *) &pmap_physseg.pvoh[npgs]; 2946 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 2947 if ((uintptr_t)pmap_physseg.pvoh + size > SEGMENT_LENGTH) 2948 panic("pmap_bootstrap: PVO list end (%p + %lx) > 256MB", 2949 pmap_physseg.pvoh, size); 2950 #endif 2951 } 2952 #endif 2953 2954 for (mp = avail, i = 0; i < avail_cnt; mp++, i++) { 2955 paddr_t pfstart = atop(mp->start); 2956 paddr_t pfend = atop(mp->start + mp->size); 2957 if (mp->size == 0) 2958 continue; 2959 if (mp->start + mp->size <= SEGMENT_LENGTH) { 2960 uvm_page_physload(pfstart, pfend, pfstart, pfend, 2961 VM_FREELIST_FIRST256); 2962 } else if (mp->start >= SEGMENT_LENGTH) { 2963 uvm_page_physload(pfstart, pfend, pfstart, pfend, 2964 VM_FREELIST_DEFAULT); 2965 } else { 2966 pfend = atop(SEGMENT_LENGTH); 2967 uvm_page_physload(pfstart, pfend, pfstart, pfend, 2968 VM_FREELIST_FIRST256); 2969 pfstart = atop(SEGMENT_LENGTH); 2970 pfend = atop(mp->start + mp->size); 2971 uvm_page_physload(pfstart, pfend, pfstart, pfend, 2972 VM_FREELIST_DEFAULT); 2973 } 2974 } 2975 2976 /* 2977 * Make sure kernel vsid is allocated as well as VSID 0. 2978 */ 2979 pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS-1)) / VSID_NBPW] 2980 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 2981 pmap_vsid_bitmap[0] |= 1; 2982 2983 /* 2984 * Initialize kernel pmap and hardware. 2985 */ 2986 for (i = 0; i < 16; i++) { 2987 pmap_kernel()->pm_sr[i] = EMPTY_SEGMENT; 2988 __asm __volatile ("mtsrin %0,%1" 2989 :: "r"(EMPTY_SEGMENT), "r"(i << ADDR_SR_SHFT)); 2990 } 2991 2992 pmap_kernel()->pm_sr[KERNEL_SR] = KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY; 2993 __asm __volatile ("mtsr %0,%1" 2994 :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT)); 2995 #ifdef KERNEL2_SR 2996 pmap_kernel()->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT|SR_SUKEY|SR_PRKEY; 2997 __asm __volatile ("mtsr %0,%1" 2998 :: "n"(KERNEL2_SR), "r"(KERNEL2_SEGMENT)); 2999 #endif 3000 for (i = 0; i < 16; i++) { 3001 if (iosrtable[i] & SR601_T) { 3002 pmap_kernel()->pm_sr[i] = iosrtable[i]; 3003 __asm __volatile ("mtsrin %0,%1" 3004 :: "r"(iosrtable[i]), "r"(i << ADDR_SR_SHFT)); 3005 } 3006 } 3007 3008 __asm __volatile ("sync; mtsdr1 %0; isync" 3009 :: "r"((uintptr_t)pmap_pteg_table | (pmap_pteg_mask >> 10))); 3010 tlbia(); 3011 3012 #ifdef ALTIVEC 3013 pmap_use_altivec = cpu_altivec; 3014 #endif 3015 3016 #ifdef DEBUG 3017 if (pmapdebug & PMAPDEBUG_BOOT) { 3018 u_int cnt; 3019 int bank; 3020 char pbuf[9]; 3021 for (cnt = 0, bank = 0; bank < vm_nphysseg; bank++) { 3022 cnt += vm_physmem[bank].avail_end - vm_physmem[bank].avail_start; 3023 printf("pmap_bootstrap: vm_physmem[%d]=%#lx-%#lx/%#lx\n", 3024 bank, 3025 ptoa(vm_physmem[bank].avail_start), 3026 ptoa(vm_physmem[bank].avail_end), 3027 ptoa(vm_physmem[bank].avail_end - vm_physmem[bank].avail_start)); 3028 } 3029 format_bytes(pbuf, sizeof(pbuf), ptoa((u_int64_t) cnt)); 3030 printf("pmap_bootstrap: UVM memory = %s (%u pages)\n", 3031 pbuf, cnt); 3032 } 3033 #endif 3034 3035 pool_init(&pmap_upvo_pool, sizeof(struct pvo_entry), 3036 sizeof(struct pvo_entry), 0, 0, "pmap_upvopl", 3037 &pmap_pool_uallocator); 3038 3039 pool_setlowat(&pmap_upvo_pool, 252); 3040 3041 pool_init(&pmap_pool, sizeof(struct pmap), 3042 sizeof(void *), 0, 0, "pmap_pl", &pmap_pool_uallocator); 3043 } 3044