1 /* $NetBSD: pmap.c,v 1.91 2014/03/03 15:36:36 macallan Exp $ */ 2 /*- 3 * Copyright (c) 2001 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 8 * 9 * Support for PPC64 Bridge mode added by Sanjay Lal <sanjayl@kymasys.com> 10 * of Kyma Systems LLC. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 36 * Copyright (C) 1995, 1996 TooLs GmbH. 37 * All rights reserved. 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 1. Redistributions of source code must retain the above copyright 43 * notice, this list of conditions and the following disclaimer. 44 * 2. Redistributions in binary form must reproduce the above copyright 45 * notice, this list of conditions and the following disclaimer in the 46 * documentation and/or other materials provided with the distribution. 47 * 3. All advertising materials mentioning features or use of this software 48 * must display the following acknowledgement: 49 * This product includes software developed by TooLs GmbH. 50 * 4. The name of TooLs GmbH may not be used to endorse or promote products 51 * derived from this software without specific prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 54 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 55 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 56 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 58 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 59 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 60 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 61 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 62 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 63 */ 64 65 #include <sys/cdefs.h> 66 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.91 2014/03/03 15:36:36 macallan Exp $"); 67 68 #define PMAP_NOOPNAMES 69 70 #include "opt_ppcarch.h" 71 #include "opt_altivec.h" 72 #include "opt_multiprocessor.h" 73 #include "opt_pmap.h" 74 75 #include <sys/param.h> 76 #include <sys/proc.h> 77 #include <sys/pool.h> 78 #include <sys/queue.h> 79 #include <sys/device.h> /* for evcnt */ 80 #include <sys/systm.h> 81 #include <sys/atomic.h> 82 83 #include <uvm/uvm.h> 84 85 #include <machine/powerpc.h> 86 #include <powerpc/bat.h> 87 #include <powerpc/pcb.h> 88 #include <powerpc/psl.h> 89 #include <powerpc/spr.h> 90 #include <powerpc/oea/spr.h> 91 #include <powerpc/oea/sr_601.h> 92 93 #ifdef ALTIVEC 94 extern int pmap_use_altivec; 95 #endif 96 97 #ifdef PMAP_MEMLIMIT 98 static paddr_t pmap_memlimit = PMAP_MEMLIMIT; 99 #else 100 static paddr_t pmap_memlimit = -PAGE_SIZE; /* there is no limit */ 101 #endif 102 103 extern struct pmap kernel_pmap_; 104 static unsigned int pmap_pages_stolen; 105 static u_long pmap_pte_valid; 106 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 107 static u_long pmap_pvo_enter_depth; 108 static u_long pmap_pvo_remove_depth; 109 #endif 110 111 #ifndef MSGBUFADDR 112 extern paddr_t msgbuf_paddr; 113 #endif 114 115 static struct mem_region *mem, *avail; 116 static u_int mem_cnt, avail_cnt; 117 118 #if !defined(PMAP_OEA64) && !defined(PMAP_OEA64_BRIDGE) 119 # define PMAP_OEA 1 120 #endif 121 122 #if defined(PMAP_OEA) 123 #define _PRIxpte "lx" 124 #else 125 #define _PRIxpte PRIx64 126 #endif 127 #define _PRIxpa "lx" 128 #define _PRIxva "lx" 129 #define _PRIsr "lx" 130 131 #ifdef PMAP_NEEDS_FIXUP 132 #if defined(PMAP_OEA) 133 #define PMAPNAME(name) pmap32_##name 134 #elif defined(PMAP_OEA64) 135 #define PMAPNAME(name) pmap64_##name 136 #elif defined(PMAP_OEA64_BRIDGE) 137 #define PMAPNAME(name) pmap64bridge_##name 138 #else 139 #error unknown variant for pmap 140 #endif 141 #endif /* PMAP_NEEDS_FIXUP */ 142 143 #ifdef PMAPNAME 144 #define STATIC static 145 #define pmap_pte_spill PMAPNAME(pte_spill) 146 #define pmap_real_memory PMAPNAME(real_memory) 147 #define pmap_init PMAPNAME(init) 148 #define pmap_virtual_space PMAPNAME(virtual_space) 149 #define pmap_create PMAPNAME(create) 150 #define pmap_reference PMAPNAME(reference) 151 #define pmap_destroy PMAPNAME(destroy) 152 #define pmap_copy PMAPNAME(copy) 153 #define pmap_update PMAPNAME(update) 154 #define pmap_enter PMAPNAME(enter) 155 #define pmap_remove PMAPNAME(remove) 156 #define pmap_kenter_pa PMAPNAME(kenter_pa) 157 #define pmap_kremove PMAPNAME(kremove) 158 #define pmap_extract PMAPNAME(extract) 159 #define pmap_protect PMAPNAME(protect) 160 #define pmap_unwire PMAPNAME(unwire) 161 #define pmap_page_protect PMAPNAME(page_protect) 162 #define pmap_query_bit PMAPNAME(query_bit) 163 #define pmap_clear_bit PMAPNAME(clear_bit) 164 165 #define pmap_activate PMAPNAME(activate) 166 #define pmap_deactivate PMAPNAME(deactivate) 167 168 #define pmap_pinit PMAPNAME(pinit) 169 #define pmap_procwr PMAPNAME(procwr) 170 171 #define pmap_pool PMAPNAME(pool) 172 #define pmap_upvo_pool PMAPNAME(upvo_pool) 173 #define pmap_mpvo_pool PMAPNAME(mpvo_pool) 174 #define pmap_pvo_table PMAPNAME(pvo_table) 175 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 176 #define pmap_pte_print PMAPNAME(pte_print) 177 #define pmap_pteg_check PMAPNAME(pteg_check) 178 #define pmap_print_mmruregs PMAPNAME(print_mmuregs) 179 #define pmap_print_pte PMAPNAME(print_pte) 180 #define pmap_pteg_dist PMAPNAME(pteg_dist) 181 #endif 182 #if defined(DEBUG) || defined(PMAPCHECK) 183 #define pmap_pvo_verify PMAPNAME(pvo_verify) 184 #define pmapcheck PMAPNAME(check) 185 #endif 186 #if defined(DEBUG) || defined(PMAPDEBUG) 187 #define pmapdebug PMAPNAME(debug) 188 #endif 189 #define pmap_steal_memory PMAPNAME(steal_memory) 190 #define pmap_bootstrap PMAPNAME(bootstrap) 191 #else 192 #define STATIC /* nothing */ 193 #endif /* PMAPNAME */ 194 195 STATIC int pmap_pte_spill(struct pmap *, vaddr_t, bool); 196 STATIC void pmap_real_memory(paddr_t *, psize_t *); 197 STATIC void pmap_init(void); 198 STATIC void pmap_virtual_space(vaddr_t *, vaddr_t *); 199 STATIC pmap_t pmap_create(void); 200 STATIC void pmap_reference(pmap_t); 201 STATIC void pmap_destroy(pmap_t); 202 STATIC void pmap_copy(pmap_t, pmap_t, vaddr_t, vsize_t, vaddr_t); 203 STATIC void pmap_update(pmap_t); 204 STATIC int pmap_enter(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int); 205 STATIC void pmap_remove(pmap_t, vaddr_t, vaddr_t); 206 STATIC void pmap_kenter_pa(vaddr_t, paddr_t, vm_prot_t, u_int); 207 STATIC void pmap_kremove(vaddr_t, vsize_t); 208 STATIC bool pmap_extract(pmap_t, vaddr_t, paddr_t *); 209 210 STATIC void pmap_protect(pmap_t, vaddr_t, vaddr_t, vm_prot_t); 211 STATIC void pmap_unwire(pmap_t, vaddr_t); 212 STATIC void pmap_page_protect(struct vm_page *, vm_prot_t); 213 STATIC bool pmap_query_bit(struct vm_page *, int); 214 STATIC bool pmap_clear_bit(struct vm_page *, int); 215 216 STATIC void pmap_activate(struct lwp *); 217 STATIC void pmap_deactivate(struct lwp *); 218 219 STATIC void pmap_pinit(pmap_t pm); 220 STATIC void pmap_procwr(struct proc *, vaddr_t, size_t); 221 222 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 223 STATIC void pmap_pte_print(volatile struct pte *); 224 STATIC void pmap_pteg_check(void); 225 STATIC void pmap_print_mmuregs(void); 226 STATIC void pmap_print_pte(pmap_t, vaddr_t); 227 STATIC void pmap_pteg_dist(void); 228 #endif 229 #if defined(DEBUG) || defined(PMAPCHECK) 230 STATIC void pmap_pvo_verify(void); 231 #endif 232 STATIC vaddr_t pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *); 233 STATIC void pmap_bootstrap(paddr_t, paddr_t); 234 235 #ifdef PMAPNAME 236 const struct pmap_ops PMAPNAME(ops) = { 237 .pmapop_pte_spill = pmap_pte_spill, 238 .pmapop_real_memory = pmap_real_memory, 239 .pmapop_init = pmap_init, 240 .pmapop_virtual_space = pmap_virtual_space, 241 .pmapop_create = pmap_create, 242 .pmapop_reference = pmap_reference, 243 .pmapop_destroy = pmap_destroy, 244 .pmapop_copy = pmap_copy, 245 .pmapop_update = pmap_update, 246 .pmapop_enter = pmap_enter, 247 .pmapop_remove = pmap_remove, 248 .pmapop_kenter_pa = pmap_kenter_pa, 249 .pmapop_kremove = pmap_kremove, 250 .pmapop_extract = pmap_extract, 251 .pmapop_protect = pmap_protect, 252 .pmapop_unwire = pmap_unwire, 253 .pmapop_page_protect = pmap_page_protect, 254 .pmapop_query_bit = pmap_query_bit, 255 .pmapop_clear_bit = pmap_clear_bit, 256 .pmapop_activate = pmap_activate, 257 .pmapop_deactivate = pmap_deactivate, 258 .pmapop_pinit = pmap_pinit, 259 .pmapop_procwr = pmap_procwr, 260 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 261 .pmapop_pte_print = pmap_pte_print, 262 .pmapop_pteg_check = pmap_pteg_check, 263 .pmapop_print_mmuregs = pmap_print_mmuregs, 264 .pmapop_print_pte = pmap_print_pte, 265 .pmapop_pteg_dist = pmap_pteg_dist, 266 #else 267 .pmapop_pte_print = NULL, 268 .pmapop_pteg_check = NULL, 269 .pmapop_print_mmuregs = NULL, 270 .pmapop_print_pte = NULL, 271 .pmapop_pteg_dist = NULL, 272 #endif 273 #if defined(DEBUG) || defined(PMAPCHECK) 274 .pmapop_pvo_verify = pmap_pvo_verify, 275 #else 276 .pmapop_pvo_verify = NULL, 277 #endif 278 .pmapop_steal_memory = pmap_steal_memory, 279 .pmapop_bootstrap = pmap_bootstrap, 280 }; 281 #endif /* !PMAPNAME */ 282 283 /* 284 * The following structure is aligned to 32 bytes 285 */ 286 struct pvo_entry { 287 LIST_ENTRY(pvo_entry) pvo_vlink; /* Link to common virt page */ 288 TAILQ_ENTRY(pvo_entry) pvo_olink; /* Link to overflow entry */ 289 struct pte pvo_pte; /* Prebuilt PTE */ 290 pmap_t pvo_pmap; /* ptr to owning pmap */ 291 vaddr_t pvo_vaddr; /* VA of entry */ 292 #define PVO_PTEGIDX_MASK 0x0007 /* which PTEG slot */ 293 #define PVO_PTEGIDX_VALID 0x0008 /* slot is valid */ 294 #define PVO_WIRED 0x0010 /* PVO entry is wired */ 295 #define PVO_MANAGED 0x0020 /* PVO e. for managed page */ 296 #define PVO_EXECUTABLE 0x0040 /* PVO e. for executable page */ 297 #define PVO_WIRED_P(pvo) ((pvo)->pvo_vaddr & PVO_WIRED) 298 #define PVO_MANAGED_P(pvo) ((pvo)->pvo_vaddr & PVO_MANAGED) 299 #define PVO_EXECUTABLE_P(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE) 300 #define PVO_ENTER_INSERT 0 /* PVO has been removed */ 301 #define PVO_SPILL_UNSET 1 /* PVO has been evicted */ 302 #define PVO_SPILL_SET 2 /* PVO has been spilled */ 303 #define PVO_SPILL_INSERT 3 /* PVO has been inserted */ 304 #define PVO_PMAP_PAGE_PROTECT 4 /* PVO has changed */ 305 #define PVO_PMAP_PROTECT 5 /* PVO has changed */ 306 #define PVO_REMOVE 6 /* PVO has been removed */ 307 #define PVO_WHERE_MASK 15 308 #define PVO_WHERE_SHFT 8 309 } __attribute__ ((aligned (32))); 310 #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) 311 #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) 312 #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) 313 #define PVO_PTEGIDX_CLR(pvo) \ 314 ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) 315 #define PVO_PTEGIDX_SET(pvo,i) \ 316 ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) 317 #define PVO_WHERE(pvo,w) \ 318 ((pvo)->pvo_vaddr &= ~(PVO_WHERE_MASK << PVO_WHERE_SHFT), \ 319 (pvo)->pvo_vaddr |= ((PVO_ ## w) << PVO_WHERE_SHFT)) 320 321 TAILQ_HEAD(pvo_tqhead, pvo_entry); 322 struct pvo_tqhead *pmap_pvo_table; /* pvo entries by ptegroup index */ 323 static struct pvo_head pmap_pvo_kunmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged); /* list of unmanaged pages */ 324 static struct pvo_head pmap_pvo_unmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged); /* list of unmanaged pages */ 325 326 struct pool pmap_pool; /* pool for pmap structures */ 327 struct pool pmap_upvo_pool; /* pool for pvo entries for unmanaged pages */ 328 struct pool pmap_mpvo_pool; /* pool for pvo entries for managed pages */ 329 330 /* 331 * We keep a cache of unmanaged pages to be used for pvo entries for 332 * unmanaged pages. 333 */ 334 struct pvo_page { 335 SIMPLEQ_ENTRY(pvo_page) pvop_link; 336 }; 337 SIMPLEQ_HEAD(pvop_head, pvo_page); 338 static struct pvop_head pmap_upvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_upvop_head); 339 static struct pvop_head pmap_mpvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_mpvop_head); 340 static u_long pmap_upvop_free; 341 static u_long pmap_upvop_maxfree; 342 static u_long pmap_mpvop_free; 343 static u_long pmap_mpvop_maxfree; 344 345 static void *pmap_pool_ualloc(struct pool *, int); 346 static void *pmap_pool_malloc(struct pool *, int); 347 348 static void pmap_pool_ufree(struct pool *, void *); 349 static void pmap_pool_mfree(struct pool *, void *); 350 351 static struct pool_allocator pmap_pool_mallocator = { 352 .pa_alloc = pmap_pool_malloc, 353 .pa_free = pmap_pool_mfree, 354 .pa_pagesz = 0, 355 }; 356 357 static struct pool_allocator pmap_pool_uallocator = { 358 .pa_alloc = pmap_pool_ualloc, 359 .pa_free = pmap_pool_ufree, 360 .pa_pagesz = 0, 361 }; 362 363 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 364 void pmap_pte_print(volatile struct pte *); 365 void pmap_pteg_check(void); 366 void pmap_pteg_dist(void); 367 void pmap_print_pte(pmap_t, vaddr_t); 368 void pmap_print_mmuregs(void); 369 #endif 370 371 #if defined(DEBUG) || defined(PMAPCHECK) 372 #ifdef PMAPCHECK 373 int pmapcheck = 1; 374 #else 375 int pmapcheck = 0; 376 #endif 377 void pmap_pvo_verify(void); 378 static void pmap_pvo_check(const struct pvo_entry *); 379 #define PMAP_PVO_CHECK(pvo) \ 380 do { \ 381 if (pmapcheck) \ 382 pmap_pvo_check(pvo); \ 383 } while (0) 384 #else 385 #define PMAP_PVO_CHECK(pvo) do { } while (/*CONSTCOND*/0) 386 #endif 387 static int pmap_pte_insert(int, struct pte *); 388 static int pmap_pvo_enter(pmap_t, struct pool *, struct pvo_head *, 389 vaddr_t, paddr_t, register_t, int); 390 static void pmap_pvo_remove(struct pvo_entry *, int, struct pvo_head *); 391 static void pmap_pvo_free(struct pvo_entry *); 392 static void pmap_pvo_free_list(struct pvo_head *); 393 static struct pvo_entry *pmap_pvo_find_va(pmap_t, vaddr_t, int *); 394 static volatile struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int); 395 static struct pvo_entry *pmap_pvo_reclaim(struct pmap *); 396 static void pvo_set_exec(struct pvo_entry *); 397 static void pvo_clear_exec(struct pvo_entry *); 398 399 static void tlbia(void); 400 401 static void pmap_release(pmap_t); 402 static paddr_t pmap_boot_find_memory(psize_t, psize_t, int); 403 404 static uint32_t pmap_pvo_reclaim_nextidx; 405 #ifdef DEBUG 406 static int pmap_pvo_reclaim_debugctr; 407 #endif 408 409 #define VSID_NBPW (sizeof(uint32_t) * 8) 410 static uint32_t pmap_vsid_bitmap[NPMAPS / VSID_NBPW]; 411 412 static int pmap_initialized; 413 414 #if defined(DEBUG) || defined(PMAPDEBUG) 415 #define PMAPDEBUG_BOOT 0x0001 416 #define PMAPDEBUG_PTE 0x0002 417 #define PMAPDEBUG_EXEC 0x0008 418 #define PMAPDEBUG_PVOENTER 0x0010 419 #define PMAPDEBUG_PVOREMOVE 0x0020 420 #define PMAPDEBUG_ACTIVATE 0x0100 421 #define PMAPDEBUG_CREATE 0x0200 422 #define PMAPDEBUG_ENTER 0x1000 423 #define PMAPDEBUG_KENTER 0x2000 424 #define PMAPDEBUG_KREMOVE 0x4000 425 #define PMAPDEBUG_REMOVE 0x8000 426 427 unsigned int pmapdebug = 0; 428 429 # define DPRINTF(x, ...) printf(x, __VA_ARGS__) 430 # define DPRINTFN(n, x, ...) do if (pmapdebug & PMAPDEBUG_ ## n) printf(x, __VA_ARGS__); while (0) 431 #else 432 # define DPRINTF(x, ...) do { } while (0) 433 # define DPRINTFN(n, x, ...) do { } while (0) 434 #endif 435 436 437 #ifdef PMAPCOUNTERS 438 /* 439 * From pmap_subr.c 440 */ 441 extern struct evcnt pmap_evcnt_mappings; 442 extern struct evcnt pmap_evcnt_unmappings; 443 444 extern struct evcnt pmap_evcnt_kernel_mappings; 445 extern struct evcnt pmap_evcnt_kernel_unmappings; 446 447 extern struct evcnt pmap_evcnt_mappings_replaced; 448 449 extern struct evcnt pmap_evcnt_exec_mappings; 450 extern struct evcnt pmap_evcnt_exec_cached; 451 452 extern struct evcnt pmap_evcnt_exec_synced; 453 extern struct evcnt pmap_evcnt_exec_synced_clear_modify; 454 extern struct evcnt pmap_evcnt_exec_synced_pvo_remove; 455 456 extern struct evcnt pmap_evcnt_exec_uncached_page_protect; 457 extern struct evcnt pmap_evcnt_exec_uncached_clear_modify; 458 extern struct evcnt pmap_evcnt_exec_uncached_zero_page; 459 extern struct evcnt pmap_evcnt_exec_uncached_copy_page; 460 extern struct evcnt pmap_evcnt_exec_uncached_pvo_remove; 461 462 extern struct evcnt pmap_evcnt_updates; 463 extern struct evcnt pmap_evcnt_collects; 464 extern struct evcnt pmap_evcnt_copies; 465 466 extern struct evcnt pmap_evcnt_ptes_spilled; 467 extern struct evcnt pmap_evcnt_ptes_unspilled; 468 extern struct evcnt pmap_evcnt_ptes_evicted; 469 470 extern struct evcnt pmap_evcnt_ptes_primary[8]; 471 extern struct evcnt pmap_evcnt_ptes_secondary[8]; 472 extern struct evcnt pmap_evcnt_ptes_removed; 473 extern struct evcnt pmap_evcnt_ptes_changed; 474 extern struct evcnt pmap_evcnt_pvos_reclaimed; 475 extern struct evcnt pmap_evcnt_pvos_failed; 476 477 extern struct evcnt pmap_evcnt_zeroed_pages; 478 extern struct evcnt pmap_evcnt_copied_pages; 479 extern struct evcnt pmap_evcnt_idlezeroed_pages; 480 481 #define PMAPCOUNT(ev) ((pmap_evcnt_ ## ev).ev_count++) 482 #define PMAPCOUNT2(ev) ((ev).ev_count++) 483 #else 484 #define PMAPCOUNT(ev) ((void) 0) 485 #define PMAPCOUNT2(ev) ((void) 0) 486 #endif 487 488 #define TLBIE(va) __asm volatile("tlbie %0" :: "r"(va)) 489 490 /* XXXSL: this needs to be moved to assembler */ 491 #define TLBIEL(va) __asm __volatile("tlbie %0" :: "r"(va)) 492 493 #ifdef MD_TLBSYNC 494 #define TLBSYNC() MD_TLBSYNC() 495 #else 496 #define TLBSYNC() __asm volatile("tlbsync") 497 #endif 498 #define SYNC() __asm volatile("sync") 499 #define EIEIO() __asm volatile("eieio") 500 #define DCBST(va) __asm __volatile("dcbst 0,%0" :: "r"(va)) 501 #define MFMSR() mfmsr() 502 #define MTMSR(psl) mtmsr(psl) 503 #define MFPVR() mfpvr() 504 #define MFSRIN(va) mfsrin(va) 505 #define MFTB() mfrtcltbl() 506 507 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 508 static inline register_t 509 mfsrin(vaddr_t va) 510 { 511 register_t sr; 512 __asm volatile ("mfsrin %0,%1" : "=r"(sr) : "r"(va)); 513 return sr; 514 } 515 #endif /* PMAP_OEA*/ 516 517 #if defined (PMAP_OEA64_BRIDGE) 518 extern void mfmsr64 (register64_t *result); 519 #endif /* PMAP_OEA64_BRIDGE */ 520 521 #define PMAP_LOCK() KERNEL_LOCK(1, NULL) 522 #define PMAP_UNLOCK() KERNEL_UNLOCK_ONE(NULL) 523 524 static inline register_t 525 pmap_interrupts_off(void) 526 { 527 register_t msr = MFMSR(); 528 if (msr & PSL_EE) 529 MTMSR(msr & ~PSL_EE); 530 return msr; 531 } 532 533 static void 534 pmap_interrupts_restore(register_t msr) 535 { 536 if (msr & PSL_EE) 537 MTMSR(msr); 538 } 539 540 static inline u_int32_t 541 mfrtcltbl(void) 542 { 543 #ifdef PPC_OEA601 544 if ((MFPVR() >> 16) == MPC601) 545 return (mfrtcl() >> 7); 546 else 547 #endif 548 return (mftbl()); 549 } 550 551 /* 552 * These small routines may have to be replaced, 553 * if/when we support processors other that the 604. 554 */ 555 556 void 557 tlbia(void) 558 { 559 char *i; 560 561 SYNC(); 562 #if defined(PMAP_OEA) 563 /* 564 * Why not use "tlbia"? Because not all processors implement it. 565 * 566 * This needs to be a per-CPU callback to do the appropriate thing 567 * for the CPU. XXX 568 */ 569 for (i = 0; i < (char *)0x00040000; i += 0x00001000) { 570 TLBIE(i); 571 EIEIO(); 572 SYNC(); 573 } 574 #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE) 575 /* This is specifically for the 970, 970UM v1.6 pp. 140. */ 576 for (i = 0; i <= (char *)0xFF000; i += 0x00001000) { 577 TLBIEL(i); 578 EIEIO(); 579 SYNC(); 580 } 581 #endif 582 TLBSYNC(); 583 SYNC(); 584 } 585 586 static inline register_t 587 va_to_vsid(const struct pmap *pm, vaddr_t addr) 588 { 589 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 590 return (pm->pm_sr[addr >> ADDR_SR_SHFT] & SR_VSID) >> SR_VSID_SHFT; 591 #else /* PMAP_OEA64 */ 592 #if 0 593 const struct ste *ste; 594 register_t hash; 595 int i; 596 597 hash = (addr >> ADDR_ESID_SHFT) & ADDR_ESID_HASH; 598 599 /* 600 * Try the primary group first 601 */ 602 ste = pm->pm_stes[hash].stes; 603 for (i = 0; i < 8; i++, ste++) { 604 if (ste->ste_hi & STE_V) && 605 (addr & ~(ADDR_POFF|ADDR_PIDX)) == (ste->ste_hi & STE_ESID)) 606 return ste; 607 } 608 609 /* 610 * Then the secondary group. 611 */ 612 ste = pm->pm_stes[hash ^ ADDR_ESID_HASH].stes; 613 for (i = 0; i < 8; i++, ste++) { 614 if (ste->ste_hi & STE_V) && 615 (addr & ~(ADDR_POFF|ADDR_PIDX)) == (ste->ste_hi & STE_ESID)) 616 return addr; 617 } 618 619 return NULL; 620 #else 621 /* 622 * Rather than searching the STE groups for the VSID, we know 623 * how we generate that from the ESID and so do that. 624 */ 625 return VSID_MAKE(addr >> ADDR_SR_SHFT, pm->pm_vsid) >> SR_VSID_SHFT; 626 #endif 627 #endif /* PMAP_OEA */ 628 } 629 630 static inline register_t 631 va_to_pteg(const struct pmap *pm, vaddr_t addr) 632 { 633 register_t hash; 634 635 hash = va_to_vsid(pm, addr) ^ ((addr & ADDR_PIDX) >> ADDR_PIDX_SHFT); 636 return hash & pmap_pteg_mask; 637 } 638 639 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 640 /* 641 * Given a PTE in the page table, calculate the VADDR that hashes to it. 642 * The only bit of magic is that the top 4 bits of the address doesn't 643 * technically exist in the PTE. But we know we reserved 4 bits of the 644 * VSID for it so that's how we get it. 645 */ 646 static vaddr_t 647 pmap_pte_to_va(volatile const struct pte *pt) 648 { 649 vaddr_t va; 650 uintptr_t ptaddr = (uintptr_t) pt; 651 652 if (pt->pte_hi & PTE_HID) 653 ptaddr ^= (pmap_pteg_mask * sizeof(struct pteg)); 654 655 /* PPC Bits 10-19 PPC64 Bits 42-51 */ 656 #if defined(PMAP_OEA) 657 va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x3ff; 658 #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE) 659 va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x7ff; 660 #endif 661 va <<= ADDR_PIDX_SHFT; 662 663 /* PPC Bits 4-9 PPC64 Bits 36-41 */ 664 va |= (pt->pte_hi & PTE_API) << ADDR_API_SHFT; 665 666 #if defined(PMAP_OEA64) 667 /* PPC63 Bits 0-35 */ 668 /* va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT; */ 669 #elif defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE) 670 /* PPC Bits 0-3 */ 671 va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT; 672 #endif 673 674 return va; 675 } 676 #endif 677 678 static inline struct pvo_head * 679 pa_to_pvoh(paddr_t pa, struct vm_page **pg_p) 680 { 681 struct vm_page *pg; 682 struct vm_page_md *md; 683 684 pg = PHYS_TO_VM_PAGE(pa); 685 if (pg_p != NULL) 686 *pg_p = pg; 687 if (pg == NULL) 688 return &pmap_pvo_unmanaged; 689 md = VM_PAGE_TO_MD(pg); 690 return &md->mdpg_pvoh; 691 } 692 693 static inline struct pvo_head * 694 vm_page_to_pvoh(struct vm_page *pg) 695 { 696 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 697 698 return &md->mdpg_pvoh; 699 } 700 701 702 static inline void 703 pmap_attr_clear(struct vm_page *pg, int ptebit) 704 { 705 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 706 707 md->mdpg_attrs &= ~ptebit; 708 } 709 710 static inline int 711 pmap_attr_fetch(struct vm_page *pg) 712 { 713 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 714 715 return md->mdpg_attrs; 716 } 717 718 static inline void 719 pmap_attr_save(struct vm_page *pg, int ptebit) 720 { 721 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 722 723 md->mdpg_attrs |= ptebit; 724 } 725 726 static inline int 727 pmap_pte_compare(const volatile struct pte *pt, const struct pte *pvo_pt) 728 { 729 if (pt->pte_hi == pvo_pt->pte_hi 730 #if 0 731 && ((pt->pte_lo ^ pvo_pt->pte_lo) & 732 ~(PTE_REF|PTE_CHG)) == 0 733 #endif 734 ) 735 return 1; 736 return 0; 737 } 738 739 static inline void 740 pmap_pte_create(struct pte *pt, const struct pmap *pm, vaddr_t va, register_t pte_lo) 741 { 742 /* 743 * Construct the PTE. Default to IMB initially. Valid bit 744 * only gets set when the real pte is set in memory. 745 * 746 * Note: Don't set the valid bit for correct operation of tlb update. 747 */ 748 #if defined(PMAP_OEA) 749 pt->pte_hi = (va_to_vsid(pm, va) << PTE_VSID_SHFT) 750 | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API); 751 pt->pte_lo = pte_lo; 752 #elif defined (PMAP_OEA64_BRIDGE) || defined (PMAP_OEA64) 753 pt->pte_hi = ((u_int64_t)va_to_vsid(pm, va) << PTE_VSID_SHFT) 754 | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API); 755 pt->pte_lo = (u_int64_t) pte_lo; 756 #endif /* PMAP_OEA */ 757 } 758 759 static inline void 760 pmap_pte_synch(volatile struct pte *pt, struct pte *pvo_pt) 761 { 762 pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF|PTE_CHG); 763 } 764 765 static inline void 766 pmap_pte_clear(volatile struct pte *pt, vaddr_t va, int ptebit) 767 { 768 /* 769 * As shown in Section 7.6.3.2.3 770 */ 771 pt->pte_lo &= ~ptebit; 772 TLBIE(va); 773 SYNC(); 774 EIEIO(); 775 TLBSYNC(); 776 SYNC(); 777 #ifdef MULTIPROCESSOR 778 DCBST(pt); 779 #endif 780 } 781 782 static inline void 783 pmap_pte_set(volatile struct pte *pt, struct pte *pvo_pt) 784 { 785 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 786 if (pvo_pt->pte_hi & PTE_VALID) 787 panic("pte_set: setting an already valid pte %p", pvo_pt); 788 #endif 789 pvo_pt->pte_hi |= PTE_VALID; 790 791 /* 792 * Update the PTE as defined in section 7.6.3.1 793 * Note that the REF/CHG bits are from pvo_pt and thus should 794 * have been saved so this routine can restore them (if desired). 795 */ 796 pt->pte_lo = pvo_pt->pte_lo; 797 EIEIO(); 798 pt->pte_hi = pvo_pt->pte_hi; 799 TLBSYNC(); 800 SYNC(); 801 #ifdef MULTIPROCESSOR 802 DCBST(pt); 803 #endif 804 pmap_pte_valid++; 805 } 806 807 static inline void 808 pmap_pte_unset(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va) 809 { 810 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 811 if ((pvo_pt->pte_hi & PTE_VALID) == 0) 812 panic("pte_unset: attempt to unset an inactive pte#1 %p/%p", pvo_pt, pt); 813 if ((pt->pte_hi & PTE_VALID) == 0) 814 panic("pte_unset: attempt to unset an inactive pte#2 %p/%p", pvo_pt, pt); 815 #endif 816 817 pvo_pt->pte_hi &= ~PTE_VALID; 818 /* 819 * Force the ref & chg bits back into the PTEs. 820 */ 821 SYNC(); 822 /* 823 * Invalidate the pte ... (Section 7.6.3.3) 824 */ 825 pt->pte_hi &= ~PTE_VALID; 826 SYNC(); 827 TLBIE(va); 828 SYNC(); 829 EIEIO(); 830 TLBSYNC(); 831 SYNC(); 832 /* 833 * Save the ref & chg bits ... 834 */ 835 pmap_pte_synch(pt, pvo_pt); 836 pmap_pte_valid--; 837 } 838 839 static inline void 840 pmap_pte_change(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va) 841 { 842 /* 843 * Invalidate the PTE 844 */ 845 pmap_pte_unset(pt, pvo_pt, va); 846 pmap_pte_set(pt, pvo_pt); 847 } 848 849 /* 850 * Try to insert the PTE @ *pvo_pt into the pmap_pteg_table at ptegidx 851 * (either primary or secondary location). 852 * 853 * Note: both the destination and source PTEs must not have PTE_VALID set. 854 */ 855 856 static int 857 pmap_pte_insert(int ptegidx, struct pte *pvo_pt) 858 { 859 volatile struct pte *pt; 860 int i; 861 862 #if defined(DEBUG) 863 DPRINTFN(PTE, "pmap_pte_insert: idx %#x, pte %#" _PRIxpte " %#" _PRIxpte "\n", 864 ptegidx, pvo_pt->pte_hi, pvo_pt->pte_lo); 865 #endif 866 /* 867 * First try primary hash. 868 */ 869 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 870 if ((pt->pte_hi & PTE_VALID) == 0) { 871 pvo_pt->pte_hi &= ~PTE_HID; 872 pmap_pte_set(pt, pvo_pt); 873 return i; 874 } 875 } 876 877 /* 878 * Now try secondary hash. 879 */ 880 ptegidx ^= pmap_pteg_mask; 881 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 882 if ((pt->pte_hi & PTE_VALID) == 0) { 883 pvo_pt->pte_hi |= PTE_HID; 884 pmap_pte_set(pt, pvo_pt); 885 return i; 886 } 887 } 888 return -1; 889 } 890 891 /* 892 * Spill handler. 893 * 894 * Tries to spill a page table entry from the overflow area. 895 * This runs in either real mode (if dealing with a exception spill) 896 * or virtual mode when dealing with manually spilling one of the 897 * kernel's pte entries. In either case, interrupts are already 898 * disabled. 899 */ 900 901 int 902 pmap_pte_spill(struct pmap *pm, vaddr_t addr, bool exec) 903 { 904 struct pvo_entry *source_pvo, *victim_pvo, *next_pvo; 905 struct pvo_entry *pvo; 906 /* XXX: gcc -- vpvoh is always set at either *1* or *2* */ 907 struct pvo_tqhead *pvoh, *vpvoh = NULL; 908 int ptegidx, i, j; 909 volatile struct pteg *pteg; 910 volatile struct pte *pt; 911 912 PMAP_LOCK(); 913 914 ptegidx = va_to_pteg(pm, addr); 915 916 /* 917 * Have to substitute some entry. Use the primary hash for this. 918 * Use low bits of timebase as random generator. Make sure we are 919 * not picking a kernel pte for replacement. 920 */ 921 pteg = &pmap_pteg_table[ptegidx]; 922 i = MFTB() & 7; 923 for (j = 0; j < 8; j++) { 924 pt = &pteg->pt[i]; 925 if ((pt->pte_hi & PTE_VALID) == 0) 926 break; 927 if (VSID_TO_HASH((pt->pte_hi & PTE_VSID) >> PTE_VSID_SHFT) 928 < PHYSMAP_VSIDBITS) 929 break; 930 i = (i + 1) & 7; 931 } 932 KASSERT(j < 8); 933 934 source_pvo = NULL; 935 victim_pvo = NULL; 936 pvoh = &pmap_pvo_table[ptegidx]; 937 TAILQ_FOREACH(pvo, pvoh, pvo_olink) { 938 939 /* 940 * We need to find pvo entry for this address... 941 */ 942 PMAP_PVO_CHECK(pvo); /* sanity check */ 943 944 /* 945 * If we haven't found the source and we come to a PVO with 946 * a valid PTE, then we know we can't find it because all 947 * evicted PVOs always are first in the list. 948 */ 949 if (source_pvo == NULL && (pvo->pvo_pte.pte_hi & PTE_VALID)) 950 break; 951 if (source_pvo == NULL && pm == pvo->pvo_pmap && 952 addr == PVO_VADDR(pvo)) { 953 954 /* 955 * Now we have found the entry to be spilled into the 956 * pteg. Attempt to insert it into the page table. 957 */ 958 j = pmap_pte_insert(ptegidx, &pvo->pvo_pte); 959 if (j >= 0) { 960 PVO_PTEGIDX_SET(pvo, j); 961 PMAP_PVO_CHECK(pvo); /* sanity check */ 962 PVO_WHERE(pvo, SPILL_INSERT); 963 pvo->pvo_pmap->pm_evictions--; 964 PMAPCOUNT(ptes_spilled); 965 PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID) 966 ? pmap_evcnt_ptes_secondary 967 : pmap_evcnt_ptes_primary)[j]); 968 969 /* 970 * Since we keep the evicted entries at the 971 * from of the PVO list, we need move this 972 * (now resident) PVO after the evicted 973 * entries. 974 */ 975 next_pvo = TAILQ_NEXT(pvo, pvo_olink); 976 977 /* 978 * If we don't have to move (either we were the 979 * last entry or the next entry was valid), 980 * don't change our position. Otherwise 981 * move ourselves to the tail of the queue. 982 */ 983 if (next_pvo != NULL && 984 !(next_pvo->pvo_pte.pte_hi & PTE_VALID)) { 985 TAILQ_REMOVE(pvoh, pvo, pvo_olink); 986 TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink); 987 } 988 PMAP_UNLOCK(); 989 return 1; 990 } 991 source_pvo = pvo; 992 if (exec && !PVO_EXECUTABLE_P(source_pvo)) { 993 return 0; 994 } 995 if (victim_pvo != NULL) 996 break; 997 } 998 999 /* 1000 * We also need the pvo entry of the victim we are replacing 1001 * so save the R & C bits of the PTE. 1002 */ 1003 if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && 1004 pmap_pte_compare(pt, &pvo->pvo_pte)) { 1005 vpvoh = pvoh; /* *1* */ 1006 victim_pvo = pvo; 1007 if (source_pvo != NULL) 1008 break; 1009 } 1010 } 1011 1012 if (source_pvo == NULL) { 1013 PMAPCOUNT(ptes_unspilled); 1014 PMAP_UNLOCK(); 1015 return 0; 1016 } 1017 1018 if (victim_pvo == NULL) { 1019 if ((pt->pte_hi & PTE_HID) == 0) 1020 panic("pmap_pte_spill: victim p-pte (%p) has " 1021 "no pvo entry!", pt); 1022 1023 /* 1024 * If this is a secondary PTE, we need to search 1025 * its primary pvo bucket for the matching PVO. 1026 */ 1027 vpvoh = &pmap_pvo_table[ptegidx ^ pmap_pteg_mask]; /* *2* */ 1028 TAILQ_FOREACH(pvo, vpvoh, pvo_olink) { 1029 PMAP_PVO_CHECK(pvo); /* sanity check */ 1030 1031 /* 1032 * We also need the pvo entry of the victim we are 1033 * replacing so save the R & C bits of the PTE. 1034 */ 1035 if (pmap_pte_compare(pt, &pvo->pvo_pte)) { 1036 victim_pvo = pvo; 1037 break; 1038 } 1039 } 1040 if (victim_pvo == NULL) 1041 panic("pmap_pte_spill: victim s-pte (%p) has " 1042 "no pvo entry!", pt); 1043 } 1044 1045 /* 1046 * The victim should be not be a kernel PVO/PTE entry. 1047 */ 1048 KASSERT(victim_pvo->pvo_pmap != pmap_kernel()); 1049 KASSERT(PVO_PTEGIDX_ISSET(victim_pvo)); 1050 KASSERT(PVO_PTEGIDX_GET(victim_pvo) == i); 1051 1052 /* 1053 * We are invalidating the TLB entry for the EA for the 1054 * we are replacing even though its valid; If we don't 1055 * we lose any ref/chg bit changes contained in the TLB 1056 * entry. 1057 */ 1058 source_pvo->pvo_pte.pte_hi &= ~PTE_HID; 1059 1060 /* 1061 * To enforce the PVO list ordering constraint that all 1062 * evicted entries should come before all valid entries, 1063 * move the source PVO to the tail of its list and the 1064 * victim PVO to the head of its list (which might not be 1065 * the same list, if the victim was using the secondary hash). 1066 */ 1067 TAILQ_REMOVE(pvoh, source_pvo, pvo_olink); 1068 TAILQ_INSERT_TAIL(pvoh, source_pvo, pvo_olink); 1069 TAILQ_REMOVE(vpvoh, victim_pvo, pvo_olink); 1070 TAILQ_INSERT_HEAD(vpvoh, victim_pvo, pvo_olink); 1071 pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr); 1072 pmap_pte_set(pt, &source_pvo->pvo_pte); 1073 victim_pvo->pvo_pmap->pm_evictions++; 1074 source_pvo->pvo_pmap->pm_evictions--; 1075 PVO_WHERE(victim_pvo, SPILL_UNSET); 1076 PVO_WHERE(source_pvo, SPILL_SET); 1077 1078 PVO_PTEGIDX_CLR(victim_pvo); 1079 PVO_PTEGIDX_SET(source_pvo, i); 1080 PMAPCOUNT2(pmap_evcnt_ptes_primary[i]); 1081 PMAPCOUNT(ptes_spilled); 1082 PMAPCOUNT(ptes_evicted); 1083 PMAPCOUNT(ptes_removed); 1084 1085 PMAP_PVO_CHECK(victim_pvo); 1086 PMAP_PVO_CHECK(source_pvo); 1087 1088 PMAP_UNLOCK(); 1089 return 1; 1090 } 1091 1092 /* 1093 * Restrict given range to physical memory 1094 */ 1095 void 1096 pmap_real_memory(paddr_t *start, psize_t *size) 1097 { 1098 struct mem_region *mp; 1099 1100 for (mp = mem; mp->size; mp++) { 1101 if (*start + *size > mp->start 1102 && *start < mp->start + mp->size) { 1103 if (*start < mp->start) { 1104 *size -= mp->start - *start; 1105 *start = mp->start; 1106 } 1107 if (*start + *size > mp->start + mp->size) 1108 *size = mp->start + mp->size - *start; 1109 return; 1110 } 1111 } 1112 *size = 0; 1113 } 1114 1115 /* 1116 * Initialize anything else for pmap handling. 1117 * Called during vm_init(). 1118 */ 1119 void 1120 pmap_init(void) 1121 { 1122 pool_init(&pmap_mpvo_pool, sizeof(struct pvo_entry), 1123 sizeof(struct pvo_entry), 0, 0, "pmap_mpvopl", 1124 &pmap_pool_mallocator, IPL_NONE); 1125 1126 pool_setlowat(&pmap_mpvo_pool, 1008); 1127 1128 pmap_initialized = 1; 1129 1130 } 1131 1132 /* 1133 * How much virtual space does the kernel get? 1134 */ 1135 void 1136 pmap_virtual_space(vaddr_t *start, vaddr_t *end) 1137 { 1138 /* 1139 * For now, reserve one segment (minus some overhead) for kernel 1140 * virtual memory 1141 */ 1142 *start = VM_MIN_KERNEL_ADDRESS; 1143 *end = VM_MAX_KERNEL_ADDRESS; 1144 } 1145 1146 /* 1147 * Allocate, initialize, and return a new physical map. 1148 */ 1149 pmap_t 1150 pmap_create(void) 1151 { 1152 pmap_t pm; 1153 1154 pm = pool_get(&pmap_pool, PR_WAITOK); 1155 KASSERT((vaddr_t)pm < VM_MIN_KERNEL_ADDRESS); 1156 memset((void *)pm, 0, sizeof *pm); 1157 pmap_pinit(pm); 1158 1159 DPRINTFN(CREATE, "pmap_create: pm %p:\n" 1160 "\t%#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr 1161 " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr "\n" 1162 "\t%#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr 1163 " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr "\n", 1164 pm, 1165 pm->pm_sr[0], pm->pm_sr[1], 1166 pm->pm_sr[2], pm->pm_sr[3], 1167 pm->pm_sr[4], pm->pm_sr[5], 1168 pm->pm_sr[6], pm->pm_sr[7], 1169 pm->pm_sr[8], pm->pm_sr[9], 1170 pm->pm_sr[10], pm->pm_sr[11], 1171 pm->pm_sr[12], pm->pm_sr[13], 1172 pm->pm_sr[14], pm->pm_sr[15]); 1173 return pm; 1174 } 1175 1176 /* 1177 * Initialize a preallocated and zeroed pmap structure. 1178 */ 1179 void 1180 pmap_pinit(pmap_t pm) 1181 { 1182 register_t entropy = MFTB(); 1183 register_t mask; 1184 int i; 1185 1186 /* 1187 * Allocate some segment registers for this pmap. 1188 */ 1189 pm->pm_refs = 1; 1190 PMAP_LOCK(); 1191 for (i = 0; i < NPMAPS; i += VSID_NBPW) { 1192 static register_t pmap_vsidcontext; 1193 register_t hash; 1194 unsigned int n; 1195 1196 /* Create a new value by multiplying by a prime adding in 1197 * entropy from the timebase register. This is to make the 1198 * VSID more random so that the PT Hash function collides 1199 * less often. (note that the prime causes gcc to do shifts 1200 * instead of a multiply) 1201 */ 1202 pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy; 1203 hash = pmap_vsidcontext & (NPMAPS - 1); 1204 if (hash == 0) { /* 0 is special, avoid it */ 1205 entropy += 0xbadf00d; 1206 continue; 1207 } 1208 n = hash >> 5; 1209 mask = 1L << (hash & (VSID_NBPW-1)); 1210 hash = pmap_vsidcontext; 1211 if (pmap_vsid_bitmap[n] & mask) { /* collision? */ 1212 /* anything free in this bucket? */ 1213 if (~pmap_vsid_bitmap[n] == 0) { 1214 entropy = hash ^ (hash >> 16); 1215 continue; 1216 } 1217 i = ffs(~pmap_vsid_bitmap[n]) - 1; 1218 mask = 1L << i; 1219 hash &= ~(VSID_NBPW-1); 1220 hash |= i; 1221 } 1222 hash &= PTE_VSID >> PTE_VSID_SHFT; 1223 pmap_vsid_bitmap[n] |= mask; 1224 pm->pm_vsid = hash; 1225 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 1226 for (i = 0; i < 16; i++) 1227 pm->pm_sr[i] = VSID_MAKE(i, hash) | SR_PRKEY | 1228 SR_NOEXEC; 1229 #endif 1230 PMAP_UNLOCK(); 1231 return; 1232 } 1233 PMAP_UNLOCK(); 1234 panic("pmap_pinit: out of segments"); 1235 } 1236 1237 /* 1238 * Add a reference to the given pmap. 1239 */ 1240 void 1241 pmap_reference(pmap_t pm) 1242 { 1243 atomic_inc_uint(&pm->pm_refs); 1244 } 1245 1246 /* 1247 * Retire the given pmap from service. 1248 * Should only be called if the map contains no valid mappings. 1249 */ 1250 void 1251 pmap_destroy(pmap_t pm) 1252 { 1253 if (atomic_dec_uint_nv(&pm->pm_refs) == 0) { 1254 pmap_release(pm); 1255 pool_put(&pmap_pool, pm); 1256 } 1257 } 1258 1259 /* 1260 * Release any resources held by the given physical map. 1261 * Called when a pmap initialized by pmap_pinit is being released. 1262 */ 1263 void 1264 pmap_release(pmap_t pm) 1265 { 1266 int idx, mask; 1267 1268 KASSERT(pm->pm_stats.resident_count == 0); 1269 KASSERT(pm->pm_stats.wired_count == 0); 1270 1271 PMAP_LOCK(); 1272 if (pm->pm_sr[0] == 0) 1273 panic("pmap_release"); 1274 idx = pm->pm_vsid & (NPMAPS-1); 1275 mask = 1 << (idx % VSID_NBPW); 1276 idx /= VSID_NBPW; 1277 1278 KASSERT(pmap_vsid_bitmap[idx] & mask); 1279 pmap_vsid_bitmap[idx] &= ~mask; 1280 PMAP_UNLOCK(); 1281 } 1282 1283 /* 1284 * Copy the range specified by src_addr/len 1285 * from the source map to the range dst_addr/len 1286 * in the destination map. 1287 * 1288 * This routine is only advisory and need not do anything. 1289 */ 1290 void 1291 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, 1292 vsize_t len, vaddr_t src_addr) 1293 { 1294 PMAPCOUNT(copies); 1295 } 1296 1297 /* 1298 * Require that all active physical maps contain no 1299 * incorrect entries NOW. 1300 */ 1301 void 1302 pmap_update(struct pmap *pmap) 1303 { 1304 PMAPCOUNT(updates); 1305 TLBSYNC(); 1306 } 1307 1308 static inline int 1309 pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 1310 { 1311 int pteidx; 1312 /* 1313 * We can find the actual pte entry without searching by 1314 * grabbing the PTEG index from 3 unused bits in pte_lo[11:9] 1315 * and by noticing the HID bit. 1316 */ 1317 pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); 1318 if (pvo->pvo_pte.pte_hi & PTE_HID) 1319 pteidx ^= pmap_pteg_mask * 8; 1320 return pteidx; 1321 } 1322 1323 volatile struct pte * 1324 pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 1325 { 1326 volatile struct pte *pt; 1327 1328 #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK) 1329 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) 1330 return NULL; 1331 #endif 1332 1333 /* 1334 * If we haven't been supplied the ptegidx, calculate it. 1335 */ 1336 if (pteidx == -1) { 1337 int ptegidx; 1338 ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr); 1339 pteidx = pmap_pvo_pte_index(pvo, ptegidx); 1340 } 1341 1342 pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7]; 1343 1344 #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK) 1345 return pt; 1346 #else 1347 if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { 1348 panic("pmap_pvo_to_pte: pvo %p: has valid pte in " 1349 "pvo but no valid pte index", pvo); 1350 } 1351 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { 1352 panic("pmap_pvo_to_pte: pvo %p: has valid pte index in " 1353 "pvo but no valid pte", pvo); 1354 } 1355 1356 if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { 1357 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) { 1358 #if defined(DEBUG) || defined(PMAPCHECK) 1359 pmap_pte_print(pt); 1360 #endif 1361 panic("pmap_pvo_to_pte: pvo %p: has valid pte in " 1362 "pmap_pteg_table %p but invalid in pvo", 1363 pvo, pt); 1364 } 1365 if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) != 0) { 1366 #if defined(DEBUG) || defined(PMAPCHECK) 1367 pmap_pte_print(pt); 1368 #endif 1369 panic("pmap_pvo_to_pte: pvo %p: pvo pte does " 1370 "not match pte %p in pmap_pteg_table", 1371 pvo, pt); 1372 } 1373 return pt; 1374 } 1375 1376 if (pvo->pvo_pte.pte_hi & PTE_VALID) { 1377 #if defined(DEBUG) || defined(PMAPCHECK) 1378 pmap_pte_print(pt); 1379 #endif 1380 panic("pmap_pvo_to_pte: pvo %p: has nomatching pte %p in " 1381 "pmap_pteg_table but valid in pvo", pvo, pt); 1382 } 1383 return NULL; 1384 #endif /* !(!DIAGNOSTIC && !DEBUG && !PMAPCHECK) */ 1385 } 1386 1387 struct pvo_entry * 1388 pmap_pvo_find_va(pmap_t pm, vaddr_t va, int *pteidx_p) 1389 { 1390 struct pvo_entry *pvo; 1391 int ptegidx; 1392 1393 va &= ~ADDR_POFF; 1394 ptegidx = va_to_pteg(pm, va); 1395 1396 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 1397 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1398 if ((uintptr_t) pvo >= SEGMENT_LENGTH) 1399 panic("pmap_pvo_find_va: invalid pvo %p on " 1400 "list %#x (%p)", pvo, ptegidx, 1401 &pmap_pvo_table[ptegidx]); 1402 #endif 1403 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1404 if (pteidx_p) 1405 *pteidx_p = pmap_pvo_pte_index(pvo, ptegidx); 1406 return pvo; 1407 } 1408 } 1409 if ((pm == pmap_kernel()) && (va < SEGMENT_LENGTH)) 1410 panic("%s: returning NULL for %s pmap, va: %#" _PRIxva "\n", 1411 __func__, (pm == pmap_kernel() ? "kernel" : "user"), va); 1412 return NULL; 1413 } 1414 1415 #if defined(DEBUG) || defined(PMAPCHECK) 1416 void 1417 pmap_pvo_check(const struct pvo_entry *pvo) 1418 { 1419 struct pvo_head *pvo_head; 1420 struct pvo_entry *pvo0; 1421 volatile struct pte *pt; 1422 int failed = 0; 1423 1424 PMAP_LOCK(); 1425 1426 if ((uintptr_t)(pvo+1) >= SEGMENT_LENGTH) 1427 panic("pmap_pvo_check: pvo %p: invalid address", pvo); 1428 1429 if ((uintptr_t)(pvo->pvo_pmap+1) >= SEGMENT_LENGTH) { 1430 printf("pmap_pvo_check: pvo %p: invalid pmap address %p\n", 1431 pvo, pvo->pvo_pmap); 1432 failed = 1; 1433 } 1434 1435 if ((uintptr_t)TAILQ_NEXT(pvo, pvo_olink) >= SEGMENT_LENGTH || 1436 (((uintptr_t)TAILQ_NEXT(pvo, pvo_olink)) & 0x1f) != 0) { 1437 printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n", 1438 pvo, TAILQ_NEXT(pvo, pvo_olink)); 1439 failed = 1; 1440 } 1441 1442 if ((uintptr_t)LIST_NEXT(pvo, pvo_vlink) >= SEGMENT_LENGTH || 1443 (((uintptr_t)LIST_NEXT(pvo, pvo_vlink)) & 0x1f) != 0) { 1444 printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n", 1445 pvo, LIST_NEXT(pvo, pvo_vlink)); 1446 failed = 1; 1447 } 1448 1449 if (PVO_MANAGED_P(pvo)) { 1450 pvo_head = pa_to_pvoh(pvo->pvo_pte.pte_lo & PTE_RPGN, NULL); 1451 } else { 1452 if (pvo->pvo_vaddr < VM_MIN_KERNEL_ADDRESS) { 1453 printf("pmap_pvo_check: pvo %p: non kernel address " 1454 "on kernel unmanaged list\n", pvo); 1455 failed = 1; 1456 } 1457 pvo_head = &pmap_pvo_kunmanaged; 1458 } 1459 LIST_FOREACH(pvo0, pvo_head, pvo_vlink) { 1460 if (pvo0 == pvo) 1461 break; 1462 } 1463 if (pvo0 == NULL) { 1464 printf("pmap_pvo_check: pvo %p: not present " 1465 "on its vlist head %p\n", pvo, pvo_head); 1466 failed = 1; 1467 } 1468 if (pvo != pmap_pvo_find_va(pvo->pvo_pmap, pvo->pvo_vaddr, NULL)) { 1469 printf("pmap_pvo_check: pvo %p: not present " 1470 "on its olist head\n", pvo); 1471 failed = 1; 1472 } 1473 pt = pmap_pvo_to_pte(pvo, -1); 1474 if (pt == NULL) { 1475 if (pvo->pvo_pte.pte_hi & PTE_VALID) { 1476 printf("pmap_pvo_check: pvo %p: pte_hi VALID but " 1477 "no PTE\n", pvo); 1478 failed = 1; 1479 } 1480 } else { 1481 if ((uintptr_t) pt < (uintptr_t) &pmap_pteg_table[0] || 1482 (uintptr_t) pt >= 1483 (uintptr_t) &pmap_pteg_table[pmap_pteg_cnt]) { 1484 printf("pmap_pvo_check: pvo %p: pte %p not in " 1485 "pteg table\n", pvo, pt); 1486 failed = 1; 1487 } 1488 if (((((uintptr_t) pt) >> 3) & 7) != PVO_PTEGIDX_GET(pvo)) { 1489 printf("pmap_pvo_check: pvo %p: pte_hi VALID but " 1490 "no PTE\n", pvo); 1491 failed = 1; 1492 } 1493 if (pvo->pvo_pte.pte_hi != pt->pte_hi) { 1494 printf("pmap_pvo_check: pvo %p: pte_hi differ: " 1495 "%#" _PRIxpte "/%#" _PRIxpte "\n", pvo, 1496 pvo->pvo_pte.pte_hi, 1497 pt->pte_hi); 1498 failed = 1; 1499 } 1500 if (((pvo->pvo_pte.pte_lo ^ pt->pte_lo) & 1501 (PTE_PP|PTE_WIMG|PTE_RPGN)) != 0) { 1502 printf("pmap_pvo_check: pvo %p: pte_lo differ: " 1503 "%#" _PRIxpte "/%#" _PRIxpte "\n", pvo, 1504 (pvo->pvo_pte.pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN)), 1505 (pt->pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN))); 1506 failed = 1; 1507 } 1508 if ((pmap_pte_to_va(pt) ^ PVO_VADDR(pvo)) & 0x0fffffff) { 1509 printf("pmap_pvo_check: pvo %p: PTE %p derived VA %#" _PRIxva "" 1510 " doesn't not match PVO's VA %#" _PRIxva "\n", 1511 pvo, pt, pmap_pte_to_va(pt), PVO_VADDR(pvo)); 1512 failed = 1; 1513 } 1514 if (failed) 1515 pmap_pte_print(pt); 1516 } 1517 if (failed) 1518 panic("pmap_pvo_check: pvo %p, pm %p: bugcheck!", pvo, 1519 pvo->pvo_pmap); 1520 1521 PMAP_UNLOCK(); 1522 } 1523 #endif /* DEBUG || PMAPCHECK */ 1524 1525 /* 1526 * Search the PVO table looking for a non-wired entry. 1527 * If we find one, remove it and return it. 1528 */ 1529 1530 struct pvo_entry * 1531 pmap_pvo_reclaim(struct pmap *pm) 1532 { 1533 struct pvo_tqhead *pvoh; 1534 struct pvo_entry *pvo; 1535 uint32_t idx, endidx; 1536 1537 endidx = pmap_pvo_reclaim_nextidx; 1538 for (idx = (endidx + 1) & pmap_pteg_mask; idx != endidx; 1539 idx = (idx + 1) & pmap_pteg_mask) { 1540 pvoh = &pmap_pvo_table[idx]; 1541 TAILQ_FOREACH(pvo, pvoh, pvo_olink) { 1542 if (!PVO_WIRED_P(pvo)) { 1543 pmap_pvo_remove(pvo, -1, NULL); 1544 pmap_pvo_reclaim_nextidx = idx; 1545 PMAPCOUNT(pvos_reclaimed); 1546 return pvo; 1547 } 1548 } 1549 } 1550 return NULL; 1551 } 1552 1553 /* 1554 * This returns whether this is the first mapping of a page. 1555 */ 1556 int 1557 pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head, 1558 vaddr_t va, paddr_t pa, register_t pte_lo, int flags) 1559 { 1560 struct pvo_entry *pvo; 1561 struct pvo_tqhead *pvoh; 1562 register_t msr; 1563 int ptegidx; 1564 int i; 1565 int poolflags = PR_NOWAIT; 1566 1567 /* 1568 * Compute the PTE Group index. 1569 */ 1570 va &= ~ADDR_POFF; 1571 ptegidx = va_to_pteg(pm, va); 1572 1573 msr = pmap_interrupts_off(); 1574 1575 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1576 if (pmap_pvo_remove_depth > 0) 1577 panic("pmap_pvo_enter: called while pmap_pvo_remove active!"); 1578 if (++pmap_pvo_enter_depth > 1) 1579 panic("pmap_pvo_enter: called recursively!"); 1580 #endif 1581 1582 /* 1583 * Remove any existing mapping for this page. Reuse the 1584 * pvo entry if there a mapping. 1585 */ 1586 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 1587 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1588 #ifdef DEBUG 1589 if ((pmapdebug & PMAPDEBUG_PVOENTER) && 1590 ((pvo->pvo_pte.pte_lo ^ (pa|pte_lo)) & 1591 ~(PTE_REF|PTE_CHG)) == 0 && 1592 va < VM_MIN_KERNEL_ADDRESS) { 1593 printf("pmap_pvo_enter: pvo %p: dup %#" _PRIxpte "/%#" _PRIxpa "\n", 1594 pvo, pvo->pvo_pte.pte_lo, pte_lo|pa); 1595 printf("pmap_pvo_enter: pte_hi=%#" _PRIxpte " sr=%#" _PRIsr "\n", 1596 pvo->pvo_pte.pte_hi, 1597 pm->pm_sr[va >> ADDR_SR_SHFT]); 1598 pmap_pte_print(pmap_pvo_to_pte(pvo, -1)); 1599 #ifdef DDBX 1600 Debugger(); 1601 #endif 1602 } 1603 #endif 1604 PMAPCOUNT(mappings_replaced); 1605 pmap_pvo_remove(pvo, -1, NULL); 1606 break; 1607 } 1608 } 1609 1610 /* 1611 * If we aren't overwriting an mapping, try to allocate 1612 */ 1613 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1614 --pmap_pvo_enter_depth; 1615 #endif 1616 pmap_interrupts_restore(msr); 1617 if (pvo) { 1618 pmap_pvo_free(pvo); 1619 } 1620 pvo = pool_get(pl, poolflags); 1621 KASSERT((vaddr_t)pvo < VM_MIN_KERNEL_ADDRESS); 1622 1623 #ifdef DEBUG 1624 /* 1625 * Exercise pmap_pvo_reclaim() a little. 1626 */ 1627 if (pvo && (flags & PMAP_CANFAIL) != 0 && 1628 pmap_pvo_reclaim_debugctr++ > 0x1000 && 1629 (pmap_pvo_reclaim_debugctr & 0xff) == 0) { 1630 pool_put(pl, pvo); 1631 pvo = NULL; 1632 } 1633 #endif 1634 1635 msr = pmap_interrupts_off(); 1636 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1637 ++pmap_pvo_enter_depth; 1638 #endif 1639 if (pvo == NULL) { 1640 pvo = pmap_pvo_reclaim(pm); 1641 if (pvo == NULL) { 1642 if ((flags & PMAP_CANFAIL) == 0) 1643 panic("pmap_pvo_enter: failed"); 1644 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1645 pmap_pvo_enter_depth--; 1646 #endif 1647 PMAPCOUNT(pvos_failed); 1648 pmap_interrupts_restore(msr); 1649 return ENOMEM; 1650 } 1651 } 1652 1653 pvo->pvo_vaddr = va; 1654 pvo->pvo_pmap = pm; 1655 pvo->pvo_vaddr &= ~ADDR_POFF; 1656 if (flags & VM_PROT_EXECUTE) { 1657 PMAPCOUNT(exec_mappings); 1658 pvo_set_exec(pvo); 1659 } 1660 if (flags & PMAP_WIRED) 1661 pvo->pvo_vaddr |= PVO_WIRED; 1662 if (pvo_head != &pmap_pvo_kunmanaged) { 1663 pvo->pvo_vaddr |= PVO_MANAGED; 1664 PMAPCOUNT(mappings); 1665 } else { 1666 PMAPCOUNT(kernel_mappings); 1667 } 1668 pmap_pte_create(&pvo->pvo_pte, pm, va, pa | pte_lo); 1669 1670 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 1671 if (PVO_WIRED_P(pvo)) 1672 pvo->pvo_pmap->pm_stats.wired_count++; 1673 pvo->pvo_pmap->pm_stats.resident_count++; 1674 #if defined(DEBUG) 1675 /* if (pm != pmap_kernel() && va < VM_MIN_KERNEL_ADDRESS) */ 1676 DPRINTFN(PVOENTER, 1677 "pmap_pvo_enter: pvo %p: pm %p va %#" _PRIxva " pa %#" _PRIxpa "\n", 1678 pvo, pm, va, pa); 1679 #endif 1680 1681 /* 1682 * We hope this succeeds but it isn't required. 1683 */ 1684 pvoh = &pmap_pvo_table[ptegidx]; 1685 i = pmap_pte_insert(ptegidx, &pvo->pvo_pte); 1686 if (i >= 0) { 1687 PVO_PTEGIDX_SET(pvo, i); 1688 PVO_WHERE(pvo, ENTER_INSERT); 1689 PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID) 1690 ? pmap_evcnt_ptes_secondary : pmap_evcnt_ptes_primary)[i]); 1691 TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink); 1692 1693 } else { 1694 /* 1695 * Since we didn't have room for this entry (which makes it 1696 * and evicted entry), place it at the head of the list. 1697 */ 1698 TAILQ_INSERT_HEAD(pvoh, pvo, pvo_olink); 1699 PMAPCOUNT(ptes_evicted); 1700 pm->pm_evictions++; 1701 /* 1702 * If this is a kernel page, make sure it's active. 1703 */ 1704 if (pm == pmap_kernel()) { 1705 i = pmap_pte_spill(pm, va, false); 1706 KASSERT(i); 1707 } 1708 } 1709 PMAP_PVO_CHECK(pvo); /* sanity check */ 1710 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1711 pmap_pvo_enter_depth--; 1712 #endif 1713 pmap_interrupts_restore(msr); 1714 return 0; 1715 } 1716 1717 static void 1718 pmap_pvo_remove(struct pvo_entry *pvo, int pteidx, struct pvo_head *pvol) 1719 { 1720 volatile struct pte *pt; 1721 int ptegidx; 1722 1723 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1724 if (++pmap_pvo_remove_depth > 1) 1725 panic("pmap_pvo_remove: called recursively!"); 1726 #endif 1727 1728 /* 1729 * If we haven't been supplied the ptegidx, calculate it. 1730 */ 1731 if (pteidx == -1) { 1732 ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr); 1733 pteidx = pmap_pvo_pte_index(pvo, ptegidx); 1734 } else { 1735 ptegidx = pteidx >> 3; 1736 if (pvo->pvo_pte.pte_hi & PTE_HID) 1737 ptegidx ^= pmap_pteg_mask; 1738 } 1739 PMAP_PVO_CHECK(pvo); /* sanity check */ 1740 1741 /* 1742 * If there is an active pte entry, we need to deactivate it 1743 * (and save the ref & chg bits). 1744 */ 1745 pt = pmap_pvo_to_pte(pvo, pteidx); 1746 if (pt != NULL) { 1747 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1748 PVO_WHERE(pvo, REMOVE); 1749 PVO_PTEGIDX_CLR(pvo); 1750 PMAPCOUNT(ptes_removed); 1751 } else { 1752 KASSERT(pvo->pvo_pmap->pm_evictions > 0); 1753 pvo->pvo_pmap->pm_evictions--; 1754 } 1755 1756 /* 1757 * Account for executable mappings. 1758 */ 1759 if (PVO_EXECUTABLE_P(pvo)) 1760 pvo_clear_exec(pvo); 1761 1762 /* 1763 * Update our statistics. 1764 */ 1765 pvo->pvo_pmap->pm_stats.resident_count--; 1766 if (PVO_WIRED_P(pvo)) 1767 pvo->pvo_pmap->pm_stats.wired_count--; 1768 1769 /* 1770 * Save the REF/CHG bits into their cache if the page is managed. 1771 */ 1772 if (PVO_MANAGED_P(pvo)) { 1773 register_t ptelo = pvo->pvo_pte.pte_lo; 1774 struct vm_page *pg = PHYS_TO_VM_PAGE(ptelo & PTE_RPGN); 1775 1776 if (pg != NULL) { 1777 /* 1778 * If this page was changed and it is mapped exec, 1779 * invalidate it. 1780 */ 1781 if ((ptelo & PTE_CHG) && 1782 (pmap_attr_fetch(pg) & PTE_EXEC)) { 1783 struct pvo_head *pvoh = vm_page_to_pvoh(pg); 1784 if (LIST_EMPTY(pvoh)) { 1785 DPRINTFN(EXEC, "[pmap_pvo_remove: " 1786 "%#" _PRIxpa ": clear-exec]\n", 1787 VM_PAGE_TO_PHYS(pg)); 1788 pmap_attr_clear(pg, PTE_EXEC); 1789 PMAPCOUNT(exec_uncached_pvo_remove); 1790 } else { 1791 DPRINTFN(EXEC, "[pmap_pvo_remove: " 1792 "%#" _PRIxpa ": syncicache]\n", 1793 VM_PAGE_TO_PHYS(pg)); 1794 pmap_syncicache(VM_PAGE_TO_PHYS(pg), 1795 PAGE_SIZE); 1796 PMAPCOUNT(exec_synced_pvo_remove); 1797 } 1798 } 1799 1800 pmap_attr_save(pg, ptelo & (PTE_REF|PTE_CHG)); 1801 } 1802 PMAPCOUNT(unmappings); 1803 } else { 1804 PMAPCOUNT(kernel_unmappings); 1805 } 1806 1807 /* 1808 * Remove the PVO from its lists and return it to the pool. 1809 */ 1810 LIST_REMOVE(pvo, pvo_vlink); 1811 TAILQ_REMOVE(&pmap_pvo_table[ptegidx], pvo, pvo_olink); 1812 if (pvol) { 1813 LIST_INSERT_HEAD(pvol, pvo, pvo_vlink); 1814 } 1815 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1816 pmap_pvo_remove_depth--; 1817 #endif 1818 } 1819 1820 void 1821 pmap_pvo_free(struct pvo_entry *pvo) 1822 { 1823 1824 pool_put(PVO_MANAGED_P(pvo) ? &pmap_mpvo_pool : &pmap_upvo_pool, pvo); 1825 } 1826 1827 void 1828 pmap_pvo_free_list(struct pvo_head *pvol) 1829 { 1830 struct pvo_entry *pvo, *npvo; 1831 1832 for (pvo = LIST_FIRST(pvol); pvo != NULL; pvo = npvo) { 1833 npvo = LIST_NEXT(pvo, pvo_vlink); 1834 LIST_REMOVE(pvo, pvo_vlink); 1835 pmap_pvo_free(pvo); 1836 } 1837 } 1838 1839 /* 1840 * Mark a mapping as executable. 1841 * If this is the first executable mapping in the segment, 1842 * clear the noexec flag. 1843 */ 1844 static void 1845 pvo_set_exec(struct pvo_entry *pvo) 1846 { 1847 struct pmap *pm = pvo->pvo_pmap; 1848 1849 if (pm == pmap_kernel() || PVO_EXECUTABLE_P(pvo)) { 1850 return; 1851 } 1852 pvo->pvo_vaddr |= PVO_EXECUTABLE; 1853 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 1854 { 1855 int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT; 1856 if (pm->pm_exec[sr]++ == 0) { 1857 pm->pm_sr[sr] &= ~SR_NOEXEC; 1858 } 1859 } 1860 #endif 1861 } 1862 1863 /* 1864 * Mark a mapping as non-executable. 1865 * If this was the last executable mapping in the segment, 1866 * set the noexec flag. 1867 */ 1868 static void 1869 pvo_clear_exec(struct pvo_entry *pvo) 1870 { 1871 struct pmap *pm = pvo->pvo_pmap; 1872 1873 if (pm == pmap_kernel() || !PVO_EXECUTABLE_P(pvo)) { 1874 return; 1875 } 1876 pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 1877 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 1878 { 1879 int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT; 1880 if (--pm->pm_exec[sr] == 0) { 1881 pm->pm_sr[sr] |= SR_NOEXEC; 1882 } 1883 } 1884 #endif 1885 } 1886 1887 /* 1888 * Insert physical page at pa into the given pmap at virtual address va. 1889 */ 1890 int 1891 pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 1892 { 1893 struct mem_region *mp; 1894 struct pvo_head *pvo_head; 1895 struct vm_page *pg; 1896 struct pool *pl; 1897 register_t pte_lo; 1898 int error; 1899 u_int was_exec = 0; 1900 1901 PMAP_LOCK(); 1902 1903 if (__predict_false(!pmap_initialized)) { 1904 pvo_head = &pmap_pvo_kunmanaged; 1905 pl = &pmap_upvo_pool; 1906 pg = NULL; 1907 was_exec = PTE_EXEC; 1908 } else { 1909 pvo_head = pa_to_pvoh(pa, &pg); 1910 pl = &pmap_mpvo_pool; 1911 } 1912 1913 DPRINTFN(ENTER, 1914 "pmap_enter(%p, %#" _PRIxva ", %#" _PRIxpa ", 0x%x, 0x%x):", 1915 pm, va, pa, prot, flags); 1916 1917 /* 1918 * If this is a managed page, and it's the first reference to the 1919 * page clear the execness of the page. Otherwise fetch the execness. 1920 */ 1921 if (pg != NULL) 1922 was_exec = pmap_attr_fetch(pg) & PTE_EXEC; 1923 1924 DPRINTFN(ENTER, " was_exec=%d", was_exec); 1925 1926 /* 1927 * Assume the page is cache inhibited and access is guarded unless 1928 * it's in our available memory array. If it is in the memory array, 1929 * asssume it's in memory coherent memory. 1930 */ 1931 if (flags & PMAP_MD_PREFETCHABLE) { 1932 pte_lo = 0; 1933 } else 1934 pte_lo = PTE_G; 1935 1936 if ((flags & PMAP_NOCACHE) == 0) { 1937 for (mp = mem; mp->size; mp++) { 1938 if (pa >= mp->start && pa < mp->start + mp->size) { 1939 pte_lo = PTE_M; 1940 break; 1941 } 1942 } 1943 #ifdef MULTIPROCESSOR 1944 if (((mfpvr() >> 16) & 0xffff) == MPC603e) 1945 pte_lo = PTE_M; 1946 #endif 1947 } else { 1948 pte_lo |= PTE_I; 1949 } 1950 1951 if (prot & VM_PROT_WRITE) 1952 pte_lo |= PTE_BW; 1953 else 1954 pte_lo |= PTE_BR; 1955 1956 /* 1957 * If this was in response to a fault, "pre-fault" the PTE's 1958 * changed/referenced bit appropriately. 1959 */ 1960 if (flags & VM_PROT_WRITE) 1961 pte_lo |= PTE_CHG; 1962 if (flags & VM_PROT_ALL) 1963 pte_lo |= PTE_REF; 1964 1965 /* 1966 * We need to know if this page can be executable 1967 */ 1968 flags |= (prot & VM_PROT_EXECUTE); 1969 1970 /* 1971 * Record mapping for later back-translation and pte spilling. 1972 * This will overwrite any existing mapping. 1973 */ 1974 error = pmap_pvo_enter(pm, pl, pvo_head, va, pa, pte_lo, flags); 1975 1976 /* 1977 * Flush the real page from the instruction cache if this page is 1978 * mapped executable and cacheable and has not been flushed since 1979 * the last time it was modified. 1980 */ 1981 if (error == 0 && 1982 (flags & VM_PROT_EXECUTE) && 1983 (pte_lo & PTE_I) == 0 && 1984 was_exec == 0) { 1985 DPRINTFN(ENTER, " %s", "syncicache"); 1986 PMAPCOUNT(exec_synced); 1987 pmap_syncicache(pa, PAGE_SIZE); 1988 if (pg != NULL) { 1989 pmap_attr_save(pg, PTE_EXEC); 1990 PMAPCOUNT(exec_cached); 1991 #if defined(DEBUG) || defined(PMAPDEBUG) 1992 if (pmapdebug & PMAPDEBUG_ENTER) 1993 printf(" marked-as-exec"); 1994 else if (pmapdebug & PMAPDEBUG_EXEC) 1995 printf("[pmap_enter: %#" _PRIxpa ": marked-as-exec]\n", 1996 VM_PAGE_TO_PHYS(pg)); 1997 1998 #endif 1999 } 2000 } 2001 2002 DPRINTFN(ENTER, ": error=%d\n", error); 2003 2004 PMAP_UNLOCK(); 2005 2006 return error; 2007 } 2008 2009 void 2010 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 2011 { 2012 struct mem_region *mp; 2013 register_t pte_lo; 2014 int error; 2015 2016 #if defined (PMAP_OEA64_BRIDGE) || defined (PMAP_OEA) 2017 if (va < VM_MIN_KERNEL_ADDRESS) 2018 panic("pmap_kenter_pa: attempt to enter " 2019 "non-kernel address %#" _PRIxva "!", va); 2020 #endif 2021 2022 DPRINTFN(KENTER, 2023 "pmap_kenter_pa(%#" _PRIxva ",%#" _PRIxpa ",%#x)\n", va, pa, prot); 2024 2025 PMAP_LOCK(); 2026 2027 /* 2028 * Assume the page is cache inhibited and access is guarded unless 2029 * it's in our available memory array. If it is in the memory array, 2030 * asssume it's in memory coherent memory. 2031 */ 2032 pte_lo = PTE_IG; 2033 if ((flags & PMAP_NOCACHE) == 0) { 2034 for (mp = mem; mp->size; mp++) { 2035 if (pa >= mp->start && pa < mp->start + mp->size) { 2036 pte_lo = PTE_M; 2037 break; 2038 } 2039 } 2040 #ifdef MULTIPROCESSOR 2041 if (((mfpvr() >> 16) & 0xffff) == MPC603e) 2042 pte_lo = PTE_M; 2043 #endif 2044 } 2045 2046 if (prot & VM_PROT_WRITE) 2047 pte_lo |= PTE_BW; 2048 else 2049 pte_lo |= PTE_BR; 2050 2051 /* 2052 * We don't care about REF/CHG on PVOs on the unmanaged list. 2053 */ 2054 error = pmap_pvo_enter(pmap_kernel(), &pmap_upvo_pool, 2055 &pmap_pvo_kunmanaged, va, pa, pte_lo, prot|PMAP_WIRED); 2056 2057 if (error != 0) 2058 panic("pmap_kenter_pa: failed to enter va %#" _PRIxva " pa %#" _PRIxpa ": %d", 2059 va, pa, error); 2060 2061 PMAP_UNLOCK(); 2062 } 2063 2064 void 2065 pmap_kremove(vaddr_t va, vsize_t len) 2066 { 2067 if (va < VM_MIN_KERNEL_ADDRESS) 2068 panic("pmap_kremove: attempt to remove " 2069 "non-kernel address %#" _PRIxva "!", va); 2070 2071 DPRINTFN(KREMOVE, "pmap_kremove(%#" _PRIxva ",%#" _PRIxva ")\n", va, len); 2072 pmap_remove(pmap_kernel(), va, va + len); 2073 } 2074 2075 /* 2076 * Remove the given range of mapping entries. 2077 */ 2078 void 2079 pmap_remove(pmap_t pm, vaddr_t va, vaddr_t endva) 2080 { 2081 struct pvo_head pvol; 2082 struct pvo_entry *pvo; 2083 register_t msr; 2084 int pteidx; 2085 2086 PMAP_LOCK(); 2087 LIST_INIT(&pvol); 2088 msr = pmap_interrupts_off(); 2089 for (; va < endva; va += PAGE_SIZE) { 2090 pvo = pmap_pvo_find_va(pm, va, &pteidx); 2091 if (pvo != NULL) { 2092 pmap_pvo_remove(pvo, pteidx, &pvol); 2093 } 2094 } 2095 pmap_interrupts_restore(msr); 2096 pmap_pvo_free_list(&pvol); 2097 PMAP_UNLOCK(); 2098 } 2099 2100 /* 2101 * Get the physical page address for the given pmap/virtual address. 2102 */ 2103 bool 2104 pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap) 2105 { 2106 struct pvo_entry *pvo; 2107 register_t msr; 2108 2109 PMAP_LOCK(); 2110 2111 /* 2112 * If this is a kernel pmap lookup, also check the battable 2113 * and if we get a hit, translate the VA to a PA using the 2114 * BAT entries. Don't check for VM_MAX_KERNEL_ADDRESS is 2115 * that will wrap back to 0. 2116 */ 2117 if (pm == pmap_kernel() && 2118 (va < VM_MIN_KERNEL_ADDRESS || 2119 (KERNEL2_SR < 15 && VM_MAX_KERNEL_ADDRESS <= va))) { 2120 KASSERT((va >> ADDR_SR_SHFT) != USER_SR); 2121 #if defined (PMAP_OEA) 2122 #ifdef PPC_OEA601 2123 if ((MFPVR() >> 16) == MPC601) { 2124 register_t batu = battable[va >> 23].batu; 2125 register_t batl = battable[va >> 23].batl; 2126 register_t sr = iosrtable[va >> ADDR_SR_SHFT]; 2127 if (BAT601_VALID_P(batl) && 2128 BAT601_VA_MATCH_P(batu, batl, va)) { 2129 register_t mask = 2130 (~(batl & BAT601_BSM) << 17) & ~0x1ffffL; 2131 if (pap) 2132 *pap = (batl & mask) | (va & ~mask); 2133 PMAP_UNLOCK(); 2134 return true; 2135 } else if (SR601_VALID_P(sr) && 2136 SR601_PA_MATCH_P(sr, va)) { 2137 if (pap) 2138 *pap = va; 2139 PMAP_UNLOCK(); 2140 return true; 2141 } 2142 } else 2143 #endif /* PPC_OEA601 */ 2144 { 2145 register_t batu = battable[BAT_VA2IDX(va)].batu; 2146 if (BAT_VALID_P(batu,0) && BAT_VA_MATCH_P(batu,va)) { 2147 register_t batl = battable[BAT_VA2IDX(va)].batl; 2148 register_t mask = 2149 (~(batu & (BAT_XBL|BAT_BL)) << 15) & ~0x1ffffL; 2150 if (pap) 2151 *pap = (batl & mask) | (va & ~mask); 2152 PMAP_UNLOCK(); 2153 return true; 2154 } 2155 } 2156 return false; 2157 #elif defined (PMAP_OEA64_BRIDGE) 2158 if (va >= SEGMENT_LENGTH) 2159 panic("%s: pm: %s va >= SEGMENT_LENGTH, va: 0x%08lx\n", 2160 __func__, (pm == pmap_kernel() ? "kernel" : "user"), va); 2161 else { 2162 if (pap) 2163 *pap = va; 2164 PMAP_UNLOCK(); 2165 return true; 2166 } 2167 #elif defined (PMAP_OEA64) 2168 #error PPC_OEA64 not supported 2169 #endif /* PPC_OEA */ 2170 } 2171 2172 msr = pmap_interrupts_off(); 2173 pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 2174 if (pvo != NULL) { 2175 PMAP_PVO_CHECK(pvo); /* sanity check */ 2176 if (pap) 2177 *pap = (pvo->pvo_pte.pte_lo & PTE_RPGN) 2178 | (va & ADDR_POFF); 2179 } 2180 pmap_interrupts_restore(msr); 2181 PMAP_UNLOCK(); 2182 return pvo != NULL; 2183 } 2184 2185 /* 2186 * Lower the protection on the specified range of this pmap. 2187 */ 2188 void 2189 pmap_protect(pmap_t pm, vaddr_t va, vaddr_t endva, vm_prot_t prot) 2190 { 2191 struct pvo_entry *pvo; 2192 volatile struct pte *pt; 2193 register_t msr; 2194 int pteidx; 2195 2196 /* 2197 * Since this routine only downgrades protection, we should 2198 * always be called with at least one bit not set. 2199 */ 2200 KASSERT(prot != VM_PROT_ALL); 2201 2202 /* 2203 * If there is no protection, this is equivalent to 2204 * remove the pmap from the pmap. 2205 */ 2206 if ((prot & VM_PROT_READ) == 0) { 2207 pmap_remove(pm, va, endva); 2208 return; 2209 } 2210 2211 PMAP_LOCK(); 2212 2213 msr = pmap_interrupts_off(); 2214 for (; va < endva; va += PAGE_SIZE) { 2215 pvo = pmap_pvo_find_va(pm, va, &pteidx); 2216 if (pvo == NULL) 2217 continue; 2218 PMAP_PVO_CHECK(pvo); /* sanity check */ 2219 2220 /* 2221 * Revoke executable if asked to do so. 2222 */ 2223 if ((prot & VM_PROT_EXECUTE) == 0) 2224 pvo_clear_exec(pvo); 2225 2226 #if 0 2227 /* 2228 * If the page is already read-only, no change 2229 * needs to be made. 2230 */ 2231 if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) 2232 continue; 2233 #endif 2234 /* 2235 * Grab the PTE pointer before we diddle with 2236 * the cached PTE copy. 2237 */ 2238 pt = pmap_pvo_to_pte(pvo, pteidx); 2239 /* 2240 * Change the protection of the page. 2241 */ 2242 pvo->pvo_pte.pte_lo &= ~PTE_PP; 2243 pvo->pvo_pte.pte_lo |= PTE_BR; 2244 2245 /* 2246 * If the PVO is in the page table, update 2247 * that pte at well. 2248 */ 2249 if (pt != NULL) { 2250 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 2251 PVO_WHERE(pvo, PMAP_PROTECT); 2252 PMAPCOUNT(ptes_changed); 2253 } 2254 2255 PMAP_PVO_CHECK(pvo); /* sanity check */ 2256 } 2257 pmap_interrupts_restore(msr); 2258 PMAP_UNLOCK(); 2259 } 2260 2261 void 2262 pmap_unwire(pmap_t pm, vaddr_t va) 2263 { 2264 struct pvo_entry *pvo; 2265 register_t msr; 2266 2267 PMAP_LOCK(); 2268 msr = pmap_interrupts_off(); 2269 pvo = pmap_pvo_find_va(pm, va, NULL); 2270 if (pvo != NULL) { 2271 if (PVO_WIRED_P(pvo)) { 2272 pvo->pvo_vaddr &= ~PVO_WIRED; 2273 pm->pm_stats.wired_count--; 2274 } 2275 PMAP_PVO_CHECK(pvo); /* sanity check */ 2276 } 2277 pmap_interrupts_restore(msr); 2278 PMAP_UNLOCK(); 2279 } 2280 2281 /* 2282 * Lower the protection on the specified physical page. 2283 */ 2284 void 2285 pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 2286 { 2287 struct pvo_head *pvo_head, pvol; 2288 struct pvo_entry *pvo, *next_pvo; 2289 volatile struct pte *pt; 2290 register_t msr; 2291 2292 PMAP_LOCK(); 2293 2294 KASSERT(prot != VM_PROT_ALL); 2295 LIST_INIT(&pvol); 2296 msr = pmap_interrupts_off(); 2297 2298 /* 2299 * When UVM reuses a page, it does a pmap_page_protect with 2300 * VM_PROT_NONE. At that point, we can clear the exec flag 2301 * since we know the page will have different contents. 2302 */ 2303 if ((prot & VM_PROT_READ) == 0) { 2304 DPRINTFN(EXEC, "[pmap_page_protect: %#" _PRIxpa ": clear-exec]\n", 2305 VM_PAGE_TO_PHYS(pg)); 2306 if (pmap_attr_fetch(pg) & PTE_EXEC) { 2307 PMAPCOUNT(exec_uncached_page_protect); 2308 pmap_attr_clear(pg, PTE_EXEC); 2309 } 2310 } 2311 2312 pvo_head = vm_page_to_pvoh(pg); 2313 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 2314 next_pvo = LIST_NEXT(pvo, pvo_vlink); 2315 PMAP_PVO_CHECK(pvo); /* sanity check */ 2316 2317 /* 2318 * Downgrading to no mapping at all, we just remove the entry. 2319 */ 2320 if ((prot & VM_PROT_READ) == 0) { 2321 pmap_pvo_remove(pvo, -1, &pvol); 2322 continue; 2323 } 2324 2325 /* 2326 * If EXEC permission is being revoked, just clear the 2327 * flag in the PVO. 2328 */ 2329 if ((prot & VM_PROT_EXECUTE) == 0) 2330 pvo_clear_exec(pvo); 2331 2332 /* 2333 * If this entry is already RO, don't diddle with the 2334 * page table. 2335 */ 2336 if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) { 2337 PMAP_PVO_CHECK(pvo); 2338 continue; 2339 } 2340 2341 /* 2342 * Grab the PTE before the we diddle the bits so 2343 * pvo_to_pte can verify the pte contents are as 2344 * expected. 2345 */ 2346 pt = pmap_pvo_to_pte(pvo, -1); 2347 pvo->pvo_pte.pte_lo &= ~PTE_PP; 2348 pvo->pvo_pte.pte_lo |= PTE_BR; 2349 if (pt != NULL) { 2350 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 2351 PVO_WHERE(pvo, PMAP_PAGE_PROTECT); 2352 PMAPCOUNT(ptes_changed); 2353 } 2354 PMAP_PVO_CHECK(pvo); /* sanity check */ 2355 } 2356 pmap_interrupts_restore(msr); 2357 pmap_pvo_free_list(&pvol); 2358 2359 PMAP_UNLOCK(); 2360 } 2361 2362 /* 2363 * Activate the address space for the specified process. If the process 2364 * is the current process, load the new MMU context. 2365 */ 2366 void 2367 pmap_activate(struct lwp *l) 2368 { 2369 struct pcb *pcb = lwp_getpcb(l); 2370 pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap; 2371 2372 DPRINTFN(ACTIVATE, 2373 "pmap_activate: lwp %p (curlwp %p)\n", l, curlwp); 2374 2375 /* 2376 * XXX Normally performed in cpu_lwp_fork(). 2377 */ 2378 pcb->pcb_pm = pmap; 2379 2380 /* 2381 * In theory, the SR registers need only be valid on return 2382 * to user space wait to do them there. 2383 */ 2384 if (l == curlwp) { 2385 /* Store pointer to new current pmap. */ 2386 curpm = pmap; 2387 } 2388 } 2389 2390 /* 2391 * Deactivate the specified process's address space. 2392 */ 2393 void 2394 pmap_deactivate(struct lwp *l) 2395 { 2396 } 2397 2398 bool 2399 pmap_query_bit(struct vm_page *pg, int ptebit) 2400 { 2401 struct pvo_entry *pvo; 2402 volatile struct pte *pt; 2403 register_t msr; 2404 2405 PMAP_LOCK(); 2406 2407 if (pmap_attr_fetch(pg) & ptebit) { 2408 PMAP_UNLOCK(); 2409 return true; 2410 } 2411 2412 msr = pmap_interrupts_off(); 2413 LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) { 2414 PMAP_PVO_CHECK(pvo); /* sanity check */ 2415 /* 2416 * See if we saved the bit off. If so cache, it and return 2417 * success. 2418 */ 2419 if (pvo->pvo_pte.pte_lo & ptebit) { 2420 pmap_attr_save(pg, ptebit); 2421 PMAP_PVO_CHECK(pvo); /* sanity check */ 2422 pmap_interrupts_restore(msr); 2423 PMAP_UNLOCK(); 2424 return true; 2425 } 2426 } 2427 /* 2428 * No luck, now go thru the hard part of looking at the ptes 2429 * themselves. Sync so any pending REF/CHG bits are flushed 2430 * to the PTEs. 2431 */ 2432 SYNC(); 2433 LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) { 2434 PMAP_PVO_CHECK(pvo); /* sanity check */ 2435 /* 2436 * See if this pvo have a valid PTE. If so, fetch the 2437 * REF/CHG bits from the valid PTE. If the appropriate 2438 * ptebit is set, cache, it and return success. 2439 */ 2440 pt = pmap_pvo_to_pte(pvo, -1); 2441 if (pt != NULL) { 2442 pmap_pte_synch(pt, &pvo->pvo_pte); 2443 if (pvo->pvo_pte.pte_lo & ptebit) { 2444 pmap_attr_save(pg, ptebit); 2445 PMAP_PVO_CHECK(pvo); /* sanity check */ 2446 pmap_interrupts_restore(msr); 2447 PMAP_UNLOCK(); 2448 return true; 2449 } 2450 } 2451 } 2452 pmap_interrupts_restore(msr); 2453 PMAP_UNLOCK(); 2454 return false; 2455 } 2456 2457 bool 2458 pmap_clear_bit(struct vm_page *pg, int ptebit) 2459 { 2460 struct pvo_head *pvoh = vm_page_to_pvoh(pg); 2461 struct pvo_entry *pvo; 2462 volatile struct pte *pt; 2463 register_t msr; 2464 int rv = 0; 2465 2466 PMAP_LOCK(); 2467 msr = pmap_interrupts_off(); 2468 2469 /* 2470 * Fetch the cache value 2471 */ 2472 rv |= pmap_attr_fetch(pg); 2473 2474 /* 2475 * Clear the cached value. 2476 */ 2477 pmap_attr_clear(pg, ptebit); 2478 2479 /* 2480 * Sync so any pending REF/CHG bits are flushed to the PTEs (so we 2481 * can reset the right ones). Note that since the pvo entries and 2482 * list heads are accessed via BAT0 and are never placed in the 2483 * page table, we don't have to worry about further accesses setting 2484 * the REF/CHG bits. 2485 */ 2486 SYNC(); 2487 2488 /* 2489 * For each pvo entry, clear pvo's ptebit. If this pvo have a 2490 * valid PTE. If so, clear the ptebit from the valid PTE. 2491 */ 2492 LIST_FOREACH(pvo, pvoh, pvo_vlink) { 2493 PMAP_PVO_CHECK(pvo); /* sanity check */ 2494 pt = pmap_pvo_to_pte(pvo, -1); 2495 if (pt != NULL) { 2496 /* 2497 * Only sync the PTE if the bit we are looking 2498 * for is not already set. 2499 */ 2500 if ((pvo->pvo_pte.pte_lo & ptebit) == 0) 2501 pmap_pte_synch(pt, &pvo->pvo_pte); 2502 /* 2503 * If the bit we are looking for was already set, 2504 * clear that bit in the pte. 2505 */ 2506 if (pvo->pvo_pte.pte_lo & ptebit) 2507 pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit); 2508 } 2509 rv |= pvo->pvo_pte.pte_lo & (PTE_CHG|PTE_REF); 2510 pvo->pvo_pte.pte_lo &= ~ptebit; 2511 PMAP_PVO_CHECK(pvo); /* sanity check */ 2512 } 2513 pmap_interrupts_restore(msr); 2514 2515 /* 2516 * If we are clearing the modify bit and this page was marked EXEC 2517 * and the user of the page thinks the page was modified, then we 2518 * need to clean it from the icache if it's mapped or clear the EXEC 2519 * bit if it's not mapped. The page itself might not have the CHG 2520 * bit set if the modification was done via DMA to the page. 2521 */ 2522 if ((ptebit & PTE_CHG) && (rv & PTE_EXEC)) { 2523 if (LIST_EMPTY(pvoh)) { 2524 DPRINTFN(EXEC, "[pmap_clear_bit: %#" _PRIxpa ": clear-exec]\n", 2525 VM_PAGE_TO_PHYS(pg)); 2526 pmap_attr_clear(pg, PTE_EXEC); 2527 PMAPCOUNT(exec_uncached_clear_modify); 2528 } else { 2529 DPRINTFN(EXEC, "[pmap_clear_bit: %#" _PRIxpa ": syncicache]\n", 2530 VM_PAGE_TO_PHYS(pg)); 2531 pmap_syncicache(VM_PAGE_TO_PHYS(pg), PAGE_SIZE); 2532 PMAPCOUNT(exec_synced_clear_modify); 2533 } 2534 } 2535 PMAP_UNLOCK(); 2536 return (rv & ptebit) != 0; 2537 } 2538 2539 void 2540 pmap_procwr(struct proc *p, vaddr_t va, size_t len) 2541 { 2542 struct pvo_entry *pvo; 2543 size_t offset = va & ADDR_POFF; 2544 int s; 2545 2546 PMAP_LOCK(); 2547 s = splvm(); 2548 while (len > 0) { 2549 size_t seglen = PAGE_SIZE - offset; 2550 if (seglen > len) 2551 seglen = len; 2552 pvo = pmap_pvo_find_va(p->p_vmspace->vm_map.pmap, va, NULL); 2553 if (pvo != NULL && PVO_EXECUTABLE_P(pvo)) { 2554 pmap_syncicache( 2555 (pvo->pvo_pte.pte_lo & PTE_RPGN) | offset, seglen); 2556 PMAP_PVO_CHECK(pvo); 2557 } 2558 va += seglen; 2559 len -= seglen; 2560 offset = 0; 2561 } 2562 splx(s); 2563 PMAP_UNLOCK(); 2564 } 2565 2566 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 2567 void 2568 pmap_pte_print(volatile struct pte *pt) 2569 { 2570 printf("PTE %p: ", pt); 2571 2572 #if defined(PMAP_OEA) 2573 /* High word: */ 2574 printf("%#" _PRIxpte ": [", pt->pte_hi); 2575 #else 2576 printf("%#" _PRIxpte ": [", pt->pte_hi); 2577 #endif /* PMAP_OEA */ 2578 2579 printf("%c ", (pt->pte_hi & PTE_VALID) ? 'v' : 'i'); 2580 printf("%c ", (pt->pte_hi & PTE_HID) ? 'h' : '-'); 2581 2582 printf("%#" _PRIxpte " %#" _PRIxpte "", 2583 (pt->pte_hi &~ PTE_VALID)>>PTE_VSID_SHFT, 2584 pt->pte_hi & PTE_API); 2585 #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE) 2586 printf(" (va %#" _PRIxva ")] ", pmap_pte_to_va(pt)); 2587 #else 2588 printf(" (va %#" _PRIxva ")] ", pmap_pte_to_va(pt)); 2589 #endif /* PMAP_OEA */ 2590 2591 /* Low word: */ 2592 #if defined (PMAP_OEA) 2593 printf(" %#" _PRIxpte ": [", pt->pte_lo); 2594 printf("%#" _PRIxpte "... ", pt->pte_lo >> 12); 2595 #else 2596 printf(" %#" _PRIxpte ": [", pt->pte_lo); 2597 printf("%#" _PRIxpte "... ", pt->pte_lo >> 12); 2598 #endif 2599 printf("%c ", (pt->pte_lo & PTE_REF) ? 'r' : 'u'); 2600 printf("%c ", (pt->pte_lo & PTE_CHG) ? 'c' : 'n'); 2601 printf("%c", (pt->pte_lo & PTE_W) ? 'w' : '.'); 2602 printf("%c", (pt->pte_lo & PTE_I) ? 'i' : '.'); 2603 printf("%c", (pt->pte_lo & PTE_M) ? 'm' : '.'); 2604 printf("%c ", (pt->pte_lo & PTE_G) ? 'g' : '.'); 2605 switch (pt->pte_lo & PTE_PP) { 2606 case PTE_BR: printf("br]\n"); break; 2607 case PTE_BW: printf("bw]\n"); break; 2608 case PTE_SO: printf("so]\n"); break; 2609 case PTE_SW: printf("sw]\n"); break; 2610 } 2611 } 2612 #endif 2613 2614 #if defined(DDB) 2615 void 2616 pmap_pteg_check(void) 2617 { 2618 volatile struct pte *pt; 2619 int i; 2620 int ptegidx; 2621 u_int p_valid = 0; 2622 u_int s_valid = 0; 2623 u_int invalid = 0; 2624 2625 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) { 2626 for (pt = pmap_pteg_table[ptegidx].pt, i = 8; --i >= 0; pt++) { 2627 if (pt->pte_hi & PTE_VALID) { 2628 if (pt->pte_hi & PTE_HID) 2629 s_valid++; 2630 else 2631 { 2632 p_valid++; 2633 } 2634 } else 2635 invalid++; 2636 } 2637 } 2638 printf("pteg_check: v(p) %#x (%d), v(s) %#x (%d), i %#x (%d)\n", 2639 p_valid, p_valid, s_valid, s_valid, 2640 invalid, invalid); 2641 } 2642 2643 void 2644 pmap_print_mmuregs(void) 2645 { 2646 int i; 2647 #if defined (PMAP_OEA) || defined (PMAP_OEA_BRIDGE) 2648 u_int cpuvers; 2649 #endif 2650 #ifndef PMAP_OEA64 2651 vaddr_t addr; 2652 register_t soft_sr[16]; 2653 #endif 2654 #if defined (PMAP_OEA) || defined (PMAP_OEA_BRIDGE) 2655 struct bat soft_ibat[4]; 2656 struct bat soft_dbat[4]; 2657 #endif 2658 paddr_t sdr1; 2659 2660 #if defined (PMAP_OEA) || defined (PMAP_OEA_BRIDGE) 2661 cpuvers = MFPVR() >> 16; 2662 #endif 2663 __asm volatile ("mfsdr1 %0" : "=r"(sdr1)); 2664 #ifndef PMAP_OEA64 2665 addr = 0; 2666 for (i = 0; i < 16; i++) { 2667 soft_sr[i] = MFSRIN(addr); 2668 addr += (1 << ADDR_SR_SHFT); 2669 } 2670 #endif 2671 2672 #if defined (PMAP_OEA) || defined (PMAP_OEA_BRIDGE) 2673 /* read iBAT (601: uBAT) registers */ 2674 __asm volatile ("mfibatu %0,0" : "=r"(soft_ibat[0].batu)); 2675 __asm volatile ("mfibatl %0,0" : "=r"(soft_ibat[0].batl)); 2676 __asm volatile ("mfibatu %0,1" : "=r"(soft_ibat[1].batu)); 2677 __asm volatile ("mfibatl %0,1" : "=r"(soft_ibat[1].batl)); 2678 __asm volatile ("mfibatu %0,2" : "=r"(soft_ibat[2].batu)); 2679 __asm volatile ("mfibatl %0,2" : "=r"(soft_ibat[2].batl)); 2680 __asm volatile ("mfibatu %0,3" : "=r"(soft_ibat[3].batu)); 2681 __asm volatile ("mfibatl %0,3" : "=r"(soft_ibat[3].batl)); 2682 2683 2684 if (cpuvers != MPC601) { 2685 /* read dBAT registers */ 2686 __asm volatile ("mfdbatu %0,0" : "=r"(soft_dbat[0].batu)); 2687 __asm volatile ("mfdbatl %0,0" : "=r"(soft_dbat[0].batl)); 2688 __asm volatile ("mfdbatu %0,1" : "=r"(soft_dbat[1].batu)); 2689 __asm volatile ("mfdbatl %0,1" : "=r"(soft_dbat[1].batl)); 2690 __asm volatile ("mfdbatu %0,2" : "=r"(soft_dbat[2].batu)); 2691 __asm volatile ("mfdbatl %0,2" : "=r"(soft_dbat[2].batl)); 2692 __asm volatile ("mfdbatu %0,3" : "=r"(soft_dbat[3].batu)); 2693 __asm volatile ("mfdbatl %0,3" : "=r"(soft_dbat[3].batl)); 2694 } 2695 #endif 2696 2697 printf("SDR1:\t%#" _PRIxpa "\n", sdr1); 2698 #ifndef PMAP_OEA64 2699 printf("SR[]:\t"); 2700 for (i = 0; i < 4; i++) 2701 printf("0x%08lx, ", soft_sr[i]); 2702 printf("\n\t"); 2703 for ( ; i < 8; i++) 2704 printf("0x%08lx, ", soft_sr[i]); 2705 printf("\n\t"); 2706 for ( ; i < 12; i++) 2707 printf("0x%08lx, ", soft_sr[i]); 2708 printf("\n\t"); 2709 for ( ; i < 16; i++) 2710 printf("0x%08lx, ", soft_sr[i]); 2711 printf("\n"); 2712 #endif 2713 2714 #if defined(PMAP_OEA) || defined(PMAP_OEA_BRIDGE) 2715 printf("%cBAT[]:\t", cpuvers == MPC601 ? 'u' : 'i'); 2716 for (i = 0; i < 4; i++) { 2717 printf("0x%08lx 0x%08lx, ", 2718 soft_ibat[i].batu, soft_ibat[i].batl); 2719 if (i == 1) 2720 printf("\n\t"); 2721 } 2722 if (cpuvers != MPC601) { 2723 printf("\ndBAT[]:\t"); 2724 for (i = 0; i < 4; i++) { 2725 printf("0x%08lx 0x%08lx, ", 2726 soft_dbat[i].batu, soft_dbat[i].batl); 2727 if (i == 1) 2728 printf("\n\t"); 2729 } 2730 } 2731 printf("\n"); 2732 #endif /* PMAP_OEA... */ 2733 } 2734 2735 void 2736 pmap_print_pte(pmap_t pm, vaddr_t va) 2737 { 2738 struct pvo_entry *pvo; 2739 volatile struct pte *pt; 2740 int pteidx; 2741 2742 pvo = pmap_pvo_find_va(pm, va, &pteidx); 2743 if (pvo != NULL) { 2744 pt = pmap_pvo_to_pte(pvo, pteidx); 2745 if (pt != NULL) { 2746 printf("VA %#" _PRIxva " -> %p -> %s %#" _PRIxpte ", %#" _PRIxpte "\n", 2747 va, pt, 2748 pt->pte_hi & PTE_HID ? "(sec)" : "(pri)", 2749 pt->pte_hi, pt->pte_lo); 2750 } else { 2751 printf("No valid PTE found\n"); 2752 } 2753 } else { 2754 printf("Address not in pmap\n"); 2755 } 2756 } 2757 2758 void 2759 pmap_pteg_dist(void) 2760 { 2761 struct pvo_entry *pvo; 2762 int ptegidx; 2763 int depth; 2764 int max_depth = 0; 2765 unsigned int depths[64]; 2766 2767 memset(depths, 0, sizeof(depths)); 2768 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) { 2769 depth = 0; 2770 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 2771 depth++; 2772 } 2773 if (depth > max_depth) 2774 max_depth = depth; 2775 if (depth > 63) 2776 depth = 63; 2777 depths[depth]++; 2778 } 2779 2780 for (depth = 0; depth < 64; depth++) { 2781 printf(" [%2d]: %8u", depth, depths[depth]); 2782 if ((depth & 3) == 3) 2783 printf("\n"); 2784 if (depth == max_depth) 2785 break; 2786 } 2787 if ((depth & 3) != 3) 2788 printf("\n"); 2789 printf("Max depth found was %d\n", max_depth); 2790 } 2791 #endif /* DEBUG */ 2792 2793 #if defined(PMAPCHECK) || defined(DEBUG) 2794 void 2795 pmap_pvo_verify(void) 2796 { 2797 int ptegidx; 2798 int s; 2799 2800 s = splvm(); 2801 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) { 2802 struct pvo_entry *pvo; 2803 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 2804 if ((uintptr_t) pvo >= SEGMENT_LENGTH) 2805 panic("pmap_pvo_verify: invalid pvo %p " 2806 "on list %#x", pvo, ptegidx); 2807 pmap_pvo_check(pvo); 2808 } 2809 } 2810 splx(s); 2811 } 2812 #endif /* PMAPCHECK */ 2813 2814 2815 void * 2816 pmap_pool_ualloc(struct pool *pp, int flags) 2817 { 2818 struct pvo_page *pvop; 2819 2820 if (uvm.page_init_done != true) { 2821 return (void *) uvm_pageboot_alloc(PAGE_SIZE); 2822 } 2823 2824 PMAP_LOCK(); 2825 pvop = SIMPLEQ_FIRST(&pmap_upvop_head); 2826 if (pvop != NULL) { 2827 pmap_upvop_free--; 2828 SIMPLEQ_REMOVE_HEAD(&pmap_upvop_head, pvop_link); 2829 PMAP_UNLOCK(); 2830 return pvop; 2831 } 2832 PMAP_UNLOCK(); 2833 return pmap_pool_malloc(pp, flags); 2834 } 2835 2836 void * 2837 pmap_pool_malloc(struct pool *pp, int flags) 2838 { 2839 struct pvo_page *pvop; 2840 struct vm_page *pg; 2841 2842 PMAP_LOCK(); 2843 pvop = SIMPLEQ_FIRST(&pmap_mpvop_head); 2844 if (pvop != NULL) { 2845 pmap_mpvop_free--; 2846 SIMPLEQ_REMOVE_HEAD(&pmap_mpvop_head, pvop_link); 2847 PMAP_UNLOCK(); 2848 return pvop; 2849 } 2850 PMAP_UNLOCK(); 2851 again: 2852 pg = uvm_pagealloc_strat(NULL, 0, NULL, UVM_PGA_USERESERVE, 2853 UVM_PGA_STRAT_ONLY, VM_FREELIST_FIRST256); 2854 if (__predict_false(pg == NULL)) { 2855 if (flags & PR_WAITOK) { 2856 uvm_wait("plpg"); 2857 goto again; 2858 } else { 2859 return (0); 2860 } 2861 } 2862 KDASSERT(VM_PAGE_TO_PHYS(pg) == (uintptr_t)VM_PAGE_TO_PHYS(pg)); 2863 return (void *)(uintptr_t) VM_PAGE_TO_PHYS(pg); 2864 } 2865 2866 void 2867 pmap_pool_ufree(struct pool *pp, void *va) 2868 { 2869 struct pvo_page *pvop; 2870 #if 0 2871 if (PHYS_TO_VM_PAGE((paddr_t) va) != NULL) { 2872 pmap_pool_mfree(va, size, tag); 2873 return; 2874 } 2875 #endif 2876 PMAP_LOCK(); 2877 pvop = va; 2878 SIMPLEQ_INSERT_HEAD(&pmap_upvop_head, pvop, pvop_link); 2879 pmap_upvop_free++; 2880 if (pmap_upvop_free > pmap_upvop_maxfree) 2881 pmap_upvop_maxfree = pmap_upvop_free; 2882 PMAP_UNLOCK(); 2883 } 2884 2885 void 2886 pmap_pool_mfree(struct pool *pp, void *va) 2887 { 2888 struct pvo_page *pvop; 2889 2890 PMAP_LOCK(); 2891 pvop = va; 2892 SIMPLEQ_INSERT_HEAD(&pmap_mpvop_head, pvop, pvop_link); 2893 pmap_mpvop_free++; 2894 if (pmap_mpvop_free > pmap_mpvop_maxfree) 2895 pmap_mpvop_maxfree = pmap_mpvop_free; 2896 PMAP_UNLOCK(); 2897 #if 0 2898 uvm_pagefree(PHYS_TO_VM_PAGE((paddr_t) va)); 2899 #endif 2900 } 2901 2902 /* 2903 * This routine in bootstraping to steal to-be-managed memory (which will 2904 * then be unmanaged). We use it to grab from the first 256MB for our 2905 * pmap needs and above 256MB for other stuff. 2906 */ 2907 vaddr_t 2908 pmap_steal_memory(vsize_t vsize, vaddr_t *vstartp, vaddr_t *vendp) 2909 { 2910 vsize_t size; 2911 vaddr_t va; 2912 paddr_t pa = 0; 2913 int npgs, bank; 2914 struct vm_physseg *ps; 2915 2916 if (uvm.page_init_done == true) 2917 panic("pmap_steal_memory: called _after_ bootstrap"); 2918 2919 *vstartp = VM_MIN_KERNEL_ADDRESS; 2920 *vendp = VM_MAX_KERNEL_ADDRESS; 2921 2922 size = round_page(vsize); 2923 npgs = atop(size); 2924 2925 /* 2926 * PA 0 will never be among those given to UVM so we can use it 2927 * to indicate we couldn't steal any memory. 2928 */ 2929 for (bank = 0; bank < vm_nphysseg; bank++) { 2930 ps = VM_PHYSMEM_PTR(bank); 2931 if (ps->free_list == VM_FREELIST_FIRST256 && 2932 ps->avail_end - ps->avail_start >= npgs) { 2933 pa = ptoa(ps->avail_start); 2934 break; 2935 } 2936 } 2937 2938 if (pa == 0) 2939 panic("pmap_steal_memory: no approriate memory to steal!"); 2940 2941 ps->avail_start += npgs; 2942 ps->start += npgs; 2943 2944 /* 2945 * If we've used up all the pages in the segment, remove it and 2946 * compact the list. 2947 */ 2948 if (ps->avail_start == ps->end) { 2949 /* 2950 * If this was the last one, then a very bad thing has occurred 2951 */ 2952 if (--vm_nphysseg == 0) 2953 panic("pmap_steal_memory: out of memory!"); 2954 2955 printf("pmap_steal_memory: consumed bank %d\n", bank); 2956 for (; bank < vm_nphysseg; bank++, ps++) { 2957 ps[0] = ps[1]; 2958 } 2959 } 2960 2961 va = (vaddr_t) pa; 2962 memset((void *) va, 0, size); 2963 pmap_pages_stolen += npgs; 2964 #ifdef DEBUG 2965 if (pmapdebug && npgs > 1) { 2966 u_int cnt = 0; 2967 for (bank = 0; bank < vm_nphysseg; bank++) { 2968 ps = VM_PHYSMEM_PTR(bank); 2969 cnt += ps->avail_end - ps->avail_start; 2970 } 2971 printf("pmap_steal_memory: stole %u (total %u) pages (%u left)\n", 2972 npgs, pmap_pages_stolen, cnt); 2973 } 2974 #endif 2975 2976 return va; 2977 } 2978 2979 /* 2980 * Find a chuck of memory with right size and alignment. 2981 */ 2982 paddr_t 2983 pmap_boot_find_memory(psize_t size, psize_t alignment, int at_end) 2984 { 2985 struct mem_region *mp; 2986 paddr_t s, e; 2987 int i, j; 2988 2989 size = round_page(size); 2990 2991 DPRINTFN(BOOT, 2992 "pmap_boot_find_memory: size=%#" _PRIxpa ", alignment=%#" _PRIxpa ", at_end=%d", 2993 size, alignment, at_end); 2994 2995 if (alignment < PAGE_SIZE || (alignment & (alignment-1)) != 0) 2996 panic("pmap_boot_find_memory: invalid alignment %#" _PRIxpa, 2997 alignment); 2998 2999 if (at_end) { 3000 if (alignment != PAGE_SIZE) 3001 panic("pmap_boot_find_memory: invalid ending " 3002 "alignment %#" _PRIxpa, alignment); 3003 3004 for (mp = &avail[avail_cnt-1]; mp >= avail; mp--) { 3005 s = mp->start + mp->size - size; 3006 if (s >= mp->start && mp->size >= size) { 3007 DPRINTFN(BOOT, ": %#" _PRIxpa "\n", s); 3008 DPRINTFN(BOOT, 3009 "pmap_boot_find_memory: b-avail[%d] start " 3010 "%#" _PRIxpa " size %#" _PRIxpa "\n", mp - avail, 3011 mp->start, mp->size); 3012 mp->size -= size; 3013 DPRINTFN(BOOT, 3014 "pmap_boot_find_memory: a-avail[%d] start " 3015 "%#" _PRIxpa " size %#" _PRIxpa "\n", mp - avail, 3016 mp->start, mp->size); 3017 return s; 3018 } 3019 } 3020 panic("pmap_boot_find_memory: no available memory"); 3021 } 3022 3023 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) { 3024 s = (mp->start + alignment - 1) & ~(alignment-1); 3025 e = s + size; 3026 3027 /* 3028 * Is the calculated region entirely within the region? 3029 */ 3030 if (s < mp->start || e > mp->start + mp->size) 3031 continue; 3032 3033 DPRINTFN(BOOT, ": %#" _PRIxpa "\n", s); 3034 if (s == mp->start) { 3035 /* 3036 * If the block starts at the beginning of region, 3037 * adjust the size & start. (the region may now be 3038 * zero in length) 3039 */ 3040 DPRINTFN(BOOT, 3041 "pmap_boot_find_memory: b-avail[%d] start " 3042 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size); 3043 mp->start += size; 3044 mp->size -= size; 3045 DPRINTFN(BOOT, 3046 "pmap_boot_find_memory: a-avail[%d] start " 3047 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size); 3048 } else if (e == mp->start + mp->size) { 3049 /* 3050 * If the block starts at the beginning of region, 3051 * adjust only the size. 3052 */ 3053 DPRINTFN(BOOT, 3054 "pmap_boot_find_memory: b-avail[%d] start " 3055 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size); 3056 mp->size -= size; 3057 DPRINTFN(BOOT, 3058 "pmap_boot_find_memory: a-avail[%d] start " 3059 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size); 3060 } else { 3061 /* 3062 * Block is in the middle of the region, so we 3063 * have to split it in two. 3064 */ 3065 for (j = avail_cnt; j > i + 1; j--) { 3066 avail[j] = avail[j-1]; 3067 } 3068 DPRINTFN(BOOT, 3069 "pmap_boot_find_memory: b-avail[%d] start " 3070 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size); 3071 mp[1].start = e; 3072 mp[1].size = mp[0].start + mp[0].size - e; 3073 mp[0].size = s - mp[0].start; 3074 avail_cnt++; 3075 for (; i < avail_cnt; i++) { 3076 DPRINTFN(BOOT, 3077 "pmap_boot_find_memory: a-avail[%d] " 3078 "start %#" _PRIxpa " size %#" _PRIxpa "\n", i, 3079 avail[i].start, avail[i].size); 3080 } 3081 } 3082 KASSERT(s == (uintptr_t) s); 3083 return s; 3084 } 3085 panic("pmap_boot_find_memory: not enough memory for " 3086 "%#" _PRIxpa "/%#" _PRIxpa " allocation?", size, alignment); 3087 } 3088 3089 /* XXXSL: we dont have any BATs to do this, map in Segment 0 1:1 using page tables */ 3090 #if defined (PMAP_OEA64_BRIDGE) 3091 int 3092 pmap_setup_segment0_map(int use_large_pages, ...) 3093 { 3094 vaddr_t va, va_end; 3095 3096 register_t pte_lo = 0x0; 3097 int ptegidx = 0; 3098 struct pte pte; 3099 va_list ap; 3100 3101 /* Coherent + Supervisor RW, no user access */ 3102 pte_lo = PTE_M; 3103 3104 /* XXXSL 3105 * Map in 1st segment 1:1, we'll be careful not to spill kernel entries later, 3106 * these have to take priority. 3107 */ 3108 for (va = 0x0; va < SEGMENT_LENGTH; va += 0x1000) { 3109 ptegidx = va_to_pteg(pmap_kernel(), va); 3110 pmap_pte_create(&pte, pmap_kernel(), va, va | pte_lo); 3111 (void)pmap_pte_insert(ptegidx, &pte); 3112 } 3113 3114 va_start(ap, use_large_pages); 3115 while (1) { 3116 paddr_t pa; 3117 size_t size; 3118 3119 va = va_arg(ap, vaddr_t); 3120 3121 if (va == 0) 3122 break; 3123 3124 pa = va_arg(ap, paddr_t); 3125 size = va_arg(ap, size_t); 3126 3127 for (va_end = va + size; va < va_end; va += 0x1000, pa += 0x1000) { 3128 #if 0 3129 printf("%s: Inserting: va: %#" _PRIxva ", pa: %#" _PRIxpa "\n", __func__, va, pa); 3130 #endif 3131 ptegidx = va_to_pteg(pmap_kernel(), va); 3132 pmap_pte_create(&pte, pmap_kernel(), va, pa | pte_lo); 3133 (void)pmap_pte_insert(ptegidx, &pte); 3134 } 3135 } 3136 3137 TLBSYNC(); 3138 SYNC(); 3139 return (0); 3140 } 3141 #endif /* PMAP_OEA64_BRIDGE */ 3142 3143 /* 3144 * This is not part of the defined PMAP interface and is specific to the 3145 * PowerPC architecture. This is called during initppc, before the system 3146 * is really initialized. 3147 */ 3148 void 3149 pmap_bootstrap(paddr_t kernelstart, paddr_t kernelend) 3150 { 3151 struct mem_region *mp, tmp; 3152 paddr_t s, e; 3153 psize_t size; 3154 int i, j; 3155 3156 /* 3157 * Get memory. 3158 */ 3159 mem_regions(&mem, &avail); 3160 #if defined(DEBUG) 3161 if (pmapdebug & PMAPDEBUG_BOOT) { 3162 printf("pmap_bootstrap: memory configuration:\n"); 3163 for (mp = mem; mp->size; mp++) { 3164 printf("pmap_bootstrap: mem start %#" _PRIxpa " size %#" _PRIxpa "\n", 3165 mp->start, mp->size); 3166 } 3167 for (mp = avail; mp->size; mp++) { 3168 printf("pmap_bootstrap: avail start %#" _PRIxpa " size %#" _PRIxpa "\n", 3169 mp->start, mp->size); 3170 } 3171 } 3172 #endif 3173 3174 /* 3175 * Find out how much physical memory we have and in how many chunks. 3176 */ 3177 for (mem_cnt = 0, mp = mem; mp->size; mp++) { 3178 if (mp->start >= pmap_memlimit) 3179 continue; 3180 if (mp->start + mp->size > pmap_memlimit) { 3181 size = pmap_memlimit - mp->start; 3182 physmem += btoc(size); 3183 } else { 3184 physmem += btoc(mp->size); 3185 } 3186 mem_cnt++; 3187 } 3188 3189 /* 3190 * Count the number of available entries. 3191 */ 3192 for (avail_cnt = 0, mp = avail; mp->size; mp++) 3193 avail_cnt++; 3194 3195 /* 3196 * Page align all regions. 3197 */ 3198 kernelstart = trunc_page(kernelstart); 3199 kernelend = round_page(kernelend); 3200 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) { 3201 s = round_page(mp->start); 3202 mp->size -= (s - mp->start); 3203 mp->size = trunc_page(mp->size); 3204 mp->start = s; 3205 e = mp->start + mp->size; 3206 3207 DPRINTFN(BOOT, 3208 "pmap_bootstrap: b-avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n", 3209 i, mp->start, mp->size); 3210 3211 /* 3212 * Don't allow the end to run beyond our artificial limit 3213 */ 3214 if (e > pmap_memlimit) 3215 e = pmap_memlimit; 3216 3217 /* 3218 * Is this region empty or strange? skip it. 3219 */ 3220 if (e <= s) { 3221 mp->start = 0; 3222 mp->size = 0; 3223 continue; 3224 } 3225 3226 /* 3227 * Does this overlap the beginning of kernel? 3228 * Does extend past the end of the kernel? 3229 */ 3230 else if (s < kernelstart && e > kernelstart) { 3231 if (e > kernelend) { 3232 avail[avail_cnt].start = kernelend; 3233 avail[avail_cnt].size = e - kernelend; 3234 avail_cnt++; 3235 } 3236 mp->size = kernelstart - s; 3237 } 3238 /* 3239 * Check whether this region overlaps the end of the kernel. 3240 */ 3241 else if (s < kernelend && e > kernelend) { 3242 mp->start = kernelend; 3243 mp->size = e - kernelend; 3244 } 3245 /* 3246 * Look whether this regions is completely inside the kernel. 3247 * Nuke it if it does. 3248 */ 3249 else if (s >= kernelstart && e <= kernelend) { 3250 mp->start = 0; 3251 mp->size = 0; 3252 } 3253 /* 3254 * If the user imposed a memory limit, enforce it. 3255 */ 3256 else if (s >= pmap_memlimit) { 3257 mp->start = -PAGE_SIZE; /* let's know why */ 3258 mp->size = 0; 3259 } 3260 else { 3261 mp->start = s; 3262 mp->size = e - s; 3263 } 3264 DPRINTFN(BOOT, 3265 "pmap_bootstrap: a-avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n", 3266 i, mp->start, mp->size); 3267 } 3268 3269 /* 3270 * Move (and uncount) all the null return to the end. 3271 */ 3272 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) { 3273 if (mp->size == 0) { 3274 tmp = avail[i]; 3275 avail[i] = avail[--avail_cnt]; 3276 avail[avail_cnt] = avail[i]; 3277 } 3278 } 3279 3280 /* 3281 * (Bubble)sort them into ascending order. 3282 */ 3283 for (i = 0; i < avail_cnt; i++) { 3284 for (j = i + 1; j < avail_cnt; j++) { 3285 if (avail[i].start > avail[j].start) { 3286 tmp = avail[i]; 3287 avail[i] = avail[j]; 3288 avail[j] = tmp; 3289 } 3290 } 3291 } 3292 3293 /* 3294 * Make sure they don't overlap. 3295 */ 3296 for (mp = avail, i = 0; i < avail_cnt - 1; i++, mp++) { 3297 if (mp[0].start + mp[0].size > mp[1].start) { 3298 mp[0].size = mp[1].start - mp[0].start; 3299 } 3300 DPRINTFN(BOOT, 3301 "pmap_bootstrap: avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n", 3302 i, mp->start, mp->size); 3303 } 3304 DPRINTFN(BOOT, 3305 "pmap_bootstrap: avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n", 3306 i, mp->start, mp->size); 3307 3308 #ifdef PTEGCOUNT 3309 pmap_pteg_cnt = PTEGCOUNT; 3310 #else /* PTEGCOUNT */ 3311 3312 pmap_pteg_cnt = 0x1000; 3313 3314 while (pmap_pteg_cnt < physmem) 3315 pmap_pteg_cnt <<= 1; 3316 3317 pmap_pteg_cnt >>= 1; 3318 #endif /* PTEGCOUNT */ 3319 3320 #ifdef DEBUG 3321 DPRINTFN(BOOT, "pmap_pteg_cnt: 0x%x\n", pmap_pteg_cnt); 3322 #endif 3323 3324 /* 3325 * Find suitably aligned memory for PTEG hash table. 3326 */ 3327 size = pmap_pteg_cnt * sizeof(struct pteg); 3328 pmap_pteg_table = (void *)(uintptr_t) pmap_boot_find_memory(size, size, 0); 3329 3330 #ifdef DEBUG 3331 DPRINTFN(BOOT, 3332 "PTEG cnt: 0x%x HTAB size: 0x%08x bytes, address: %p\n", pmap_pteg_cnt, (unsigned int)size, pmap_pteg_table); 3333 #endif 3334 3335 3336 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 3337 if ( (uintptr_t) pmap_pteg_table + size > SEGMENT_LENGTH) 3338 panic("pmap_bootstrap: pmap_pteg_table end (%p + %#" _PRIxpa ") > 256MB", 3339 pmap_pteg_table, size); 3340 #endif 3341 3342 memset(__UNVOLATILE(pmap_pteg_table), 0, 3343 pmap_pteg_cnt * sizeof(struct pteg)); 3344 pmap_pteg_mask = pmap_pteg_cnt - 1; 3345 3346 /* 3347 * We cannot do pmap_steal_memory here since UVM hasn't been loaded 3348 * with pages. So we just steal them before giving them to UVM. 3349 */ 3350 size = sizeof(pmap_pvo_table[0]) * pmap_pteg_cnt; 3351 pmap_pvo_table = (void *)(uintptr_t) pmap_boot_find_memory(size, PAGE_SIZE, 0); 3352 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 3353 if ( (uintptr_t) pmap_pvo_table + size > SEGMENT_LENGTH) 3354 panic("pmap_bootstrap: pmap_pvo_table end (%p + %#" _PRIxpa ") > 256MB", 3355 pmap_pvo_table, size); 3356 #endif 3357 3358 for (i = 0; i < pmap_pteg_cnt; i++) 3359 TAILQ_INIT(&pmap_pvo_table[i]); 3360 3361 #ifndef MSGBUFADDR 3362 /* 3363 * Allocate msgbuf in high memory. 3364 */ 3365 msgbuf_paddr = pmap_boot_find_memory(MSGBUFSIZE, PAGE_SIZE, 1); 3366 #endif 3367 3368 for (mp = avail, i = 0; i < avail_cnt; mp++, i++) { 3369 paddr_t pfstart = atop(mp->start); 3370 paddr_t pfend = atop(mp->start + mp->size); 3371 if (mp->size == 0) 3372 continue; 3373 if (mp->start + mp->size <= SEGMENT_LENGTH) { 3374 uvm_page_physload(pfstart, pfend, pfstart, pfend, 3375 VM_FREELIST_FIRST256); 3376 } else if (mp->start >= SEGMENT_LENGTH) { 3377 uvm_page_physload(pfstart, pfend, pfstart, pfend, 3378 VM_FREELIST_DEFAULT); 3379 } else { 3380 pfend = atop(SEGMENT_LENGTH); 3381 uvm_page_physload(pfstart, pfend, pfstart, pfend, 3382 VM_FREELIST_FIRST256); 3383 pfstart = atop(SEGMENT_LENGTH); 3384 pfend = atop(mp->start + mp->size); 3385 uvm_page_physload(pfstart, pfend, pfstart, pfend, 3386 VM_FREELIST_DEFAULT); 3387 } 3388 } 3389 3390 /* 3391 * Make sure kernel vsid is allocated as well as VSID 0. 3392 */ 3393 pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS-1)) / VSID_NBPW] 3394 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 3395 pmap_vsid_bitmap[(PHYSMAP_VSIDBITS & (NPMAPS-1)) / VSID_NBPW] 3396 |= 1 << (PHYSMAP_VSIDBITS % VSID_NBPW); 3397 pmap_vsid_bitmap[0] |= 1; 3398 3399 /* 3400 * Initialize kernel pmap and hardware. 3401 */ 3402 3403 /* PMAP_OEA64_BRIDGE does support these instructions */ 3404 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 3405 for (i = 0; i < 16; i++) { 3406 #if defined(PPC_OEA601) 3407 /* XXX wedges for segment register 0xf , so set later */ 3408 if ((iosrtable[i] & SR601_T) && ((MFPVR() >> 16) == MPC601)) 3409 continue; 3410 #endif 3411 pmap_kernel()->pm_sr[i] = KERNELN_SEGMENT(i)|SR_PRKEY; 3412 __asm volatile ("mtsrin %0,%1" 3413 :: "r"(KERNELN_SEGMENT(i)|SR_PRKEY), "r"(i << ADDR_SR_SHFT)); 3414 } 3415 3416 pmap_kernel()->pm_sr[KERNEL_SR] = KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY; 3417 __asm volatile ("mtsr %0,%1" 3418 :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT)); 3419 #ifdef KERNEL2_SR 3420 pmap_kernel()->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT|SR_SUKEY|SR_PRKEY; 3421 __asm volatile ("mtsr %0,%1" 3422 :: "n"(KERNEL2_SR), "r"(KERNEL2_SEGMENT)); 3423 #endif 3424 #endif /* PMAP_OEA || PMAP_OEA64_BRIDGE */ 3425 #if defined (PMAP_OEA) 3426 for (i = 0; i < 16; i++) { 3427 if (iosrtable[i] & SR601_T) { 3428 pmap_kernel()->pm_sr[i] = iosrtable[i]; 3429 __asm volatile ("mtsrin %0,%1" 3430 :: "r"(iosrtable[i]), "r"(i << ADDR_SR_SHFT)); 3431 } 3432 } 3433 __asm volatile ("sync; mtsdr1 %0; isync" 3434 :: "r"((uintptr_t)pmap_pteg_table | (pmap_pteg_mask >> 10))); 3435 #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE) 3436 __asm __volatile ("sync; mtsdr1 %0; isync" 3437 :: "r"((uintptr_t)pmap_pteg_table | (32 - __builtin_clz(pmap_pteg_mask >> 11)))); 3438 #endif 3439 tlbia(); 3440 3441 #ifdef ALTIVEC 3442 pmap_use_altivec = cpu_altivec; 3443 #endif 3444 3445 #ifdef DEBUG 3446 if (pmapdebug & PMAPDEBUG_BOOT) { 3447 u_int cnt; 3448 int bank; 3449 char pbuf[9]; 3450 for (cnt = 0, bank = 0; bank < vm_nphysseg; bank++) { 3451 cnt += VM_PHYSMEM_PTR(bank)->avail_end - VM_PHYSMEM_PTR(bank)->avail_start; 3452 printf("pmap_bootstrap: vm_physmem[%d]=%#" _PRIxpa "-%#" _PRIxpa "/%#" _PRIxpa "\n", 3453 bank, 3454 ptoa(VM_PHYSMEM_PTR(bank)->avail_start), 3455 ptoa(VM_PHYSMEM_PTR(bank)->avail_end), 3456 ptoa(VM_PHYSMEM_PTR(bank)->avail_end - VM_PHYSMEM_PTR(bank)->avail_start)); 3457 } 3458 format_bytes(pbuf, sizeof(pbuf), ptoa((u_int64_t) cnt)); 3459 printf("pmap_bootstrap: UVM memory = %s (%u pages)\n", 3460 pbuf, cnt); 3461 } 3462 #endif 3463 3464 pool_init(&pmap_upvo_pool, sizeof(struct pvo_entry), 3465 sizeof(struct pvo_entry), 0, 0, "pmap_upvopl", 3466 &pmap_pool_uallocator, IPL_VM); 3467 3468 pool_setlowat(&pmap_upvo_pool, 252); 3469 3470 pool_init(&pmap_pool, sizeof(struct pmap), 3471 sizeof(void *), 0, 0, "pmap_pl", &pmap_pool_uallocator, 3472 IPL_NONE); 3473 3474 #if defined(PMAP_NEED_MAPKERNEL) 3475 { 3476 struct pmap *pm = pmap_kernel(); 3477 #if defined(PMAP_NEED_FULL_MAPKERNEL) 3478 extern int etext[], kernel_text[]; 3479 vaddr_t va, va_etext = (paddr_t) etext; 3480 #endif 3481 paddr_t pa, pa_end; 3482 register_t sr; 3483 struct pte pt; 3484 unsigned int ptegidx; 3485 int bank; 3486 3487 sr = PHYSMAPN_SEGMENT(0) | SR_SUKEY|SR_PRKEY; 3488 pm->pm_sr[0] = sr; 3489 3490 for (bank = 0; bank < vm_nphysseg; bank++) { 3491 pa_end = ptoa(VM_PHYSMEM_PTR(bank)->avail_end); 3492 pa = ptoa(VM_PHYSMEM_PTR(bank)->avail_start); 3493 for (; pa < pa_end; pa += PAGE_SIZE) { 3494 ptegidx = va_to_pteg(pm, pa); 3495 pmap_pte_create(&pt, pm, pa, pa | PTE_M|PTE_BW); 3496 pmap_pte_insert(ptegidx, &pt); 3497 } 3498 } 3499 3500 #if defined(PMAP_NEED_FULL_MAPKERNEL) 3501 va = (vaddr_t) kernel_text; 3502 3503 for (pa = kernelstart; va < va_etext; 3504 pa += PAGE_SIZE, va += PAGE_SIZE) { 3505 ptegidx = va_to_pteg(pm, va); 3506 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BR); 3507 pmap_pte_insert(ptegidx, &pt); 3508 } 3509 3510 for (; pa < kernelend; 3511 pa += PAGE_SIZE, va += PAGE_SIZE) { 3512 ptegidx = va_to_pteg(pm, va); 3513 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW); 3514 pmap_pte_insert(ptegidx, &pt); 3515 } 3516 3517 for (va = 0, pa = 0; va < kernelstart; 3518 pa += PAGE_SIZE, va += PAGE_SIZE) { 3519 ptegidx = va_to_pteg(pm, va); 3520 if (va < 0x3000) 3521 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BR); 3522 else 3523 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW); 3524 pmap_pte_insert(ptegidx, &pt); 3525 } 3526 for (va = kernelend, pa = kernelend; va < SEGMENT_LENGTH; 3527 pa += PAGE_SIZE, va += PAGE_SIZE) { 3528 ptegidx = va_to_pteg(pm, va); 3529 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW); 3530 pmap_pte_insert(ptegidx, &pt); 3531 } 3532 #endif 3533 3534 __asm volatile ("mtsrin %0,%1" 3535 :: "r"(sr), "r"(kernelstart)); 3536 } 3537 #endif 3538 3539 #if defined(PMAPDEBUG) 3540 if ( pmapdebug ) 3541 pmap_print_mmuregs(); 3542 #endif 3543 } 3544