1 /* $NetBSD: pmap.c,v 1.75 2011/01/18 01:02:55 matt Exp $ */ 2 /*- 3 * Copyright (c) 2001 The NetBSD Foundation, Inc. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to The NetBSD Foundation 7 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 8 * 9 * Support for PPC64 Bridge mode added by Sanjay Lal <sanjayl@kymasys.com> 10 * of Kyma Systems LLC. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 36 * Copyright (C) 1995, 1996 TooLs GmbH. 37 * All rights reserved. 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 1. Redistributions of source code must retain the above copyright 43 * notice, this list of conditions and the following disclaimer. 44 * 2. Redistributions in binary form must reproduce the above copyright 45 * notice, this list of conditions and the following disclaimer in the 46 * documentation and/or other materials provided with the distribution. 47 * 3. All advertising materials mentioning features or use of this software 48 * must display the following acknowledgement: 49 * This product includes software developed by TooLs GmbH. 50 * 4. The name of TooLs GmbH may not be used to endorse or promote products 51 * derived from this software without specific prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 54 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 55 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 56 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 58 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 59 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 60 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 61 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 62 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 63 */ 64 65 #include <sys/cdefs.h> 66 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.75 2011/01/18 01:02:55 matt Exp $"); 67 68 #define PMAP_NOOPNAMES 69 70 #include "opt_ppcarch.h" 71 #include "opt_altivec.h" 72 #include "opt_multiprocessor.h" 73 #include "opt_pmap.h" 74 75 #include <sys/param.h> 76 #include <sys/malloc.h> 77 #include <sys/proc.h> 78 #include <sys/pool.h> 79 #include <sys/queue.h> 80 #include <sys/device.h> /* for evcnt */ 81 #include <sys/systm.h> 82 #include <sys/atomic.h> 83 84 #include <uvm/uvm.h> 85 86 #include <machine/pcb.h> 87 #include <machine/powerpc.h> 88 #include <powerpc/spr.h> 89 #include <powerpc/bat.h> 90 #include <powerpc/stdarg.h> 91 #include <powerpc/oea/spr.h> 92 #include <powerpc/oea/sr_601.h> 93 94 #ifdef ALTIVEC 95 int pmap_use_altivec; 96 #endif 97 98 volatile struct pteg *pmap_pteg_table; 99 unsigned int pmap_pteg_cnt; 100 unsigned int pmap_pteg_mask; 101 #ifdef PMAP_MEMLIMIT 102 static paddr_t pmap_memlimit = PMAP_MEMLIMIT; 103 #else 104 static paddr_t pmap_memlimit = -PAGE_SIZE; /* there is no limit */ 105 #endif 106 107 struct pmap kernel_pmap_; 108 unsigned int pmap_pages_stolen; 109 u_long pmap_pte_valid; 110 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 111 u_long pmap_pvo_enter_depth; 112 u_long pmap_pvo_remove_depth; 113 #endif 114 115 #ifndef MSGBUFADDR 116 extern paddr_t msgbuf_paddr; 117 #endif 118 119 static struct mem_region *mem, *avail; 120 static u_int mem_cnt, avail_cnt; 121 122 #if !defined(PMAP_OEA64) && !defined(PMAP_OEA64_BRIDGE) 123 # define PMAP_OEA 1 124 # if defined(PMAP_EXCLUDE_DECLS) && !defined(PPC_OEA64) && !defined(PPC_OEA64_BRIDGE) 125 # define PMAPNAME(name) pmap_##name 126 # endif 127 #endif 128 129 #if defined(PMAP_OEA64) 130 # if defined(PMAP_EXCLUDE_DECLS) && !defined(PPC_OEA) && !defined(PPC_OEA64_BRIDGE) 131 # define PMAPNAME(name) pmap_##name 132 # endif 133 #endif 134 135 #if defined(PMAP_OEA64_BRIDGE) 136 # if defined(PMAP_EXCLUDE_DECLS) && !defined(PPC_OEA) && !defined(PPC_OEA64) 137 # define PMAPNAME(name) pmap_##name 138 # endif 139 #endif 140 141 #if defined(PMAP_OEA) 142 #define _PRIxpte "lx" 143 #else 144 #define _PRIxpte PRIx64 145 #endif 146 #define _PRIxpa "lx" 147 #define _PRIxva "lx" 148 #define _PRIsr "lx" 149 150 #if defined(PMAP_EXCLUDE_DECLS) && !defined(PMAPNAME) 151 #if defined(PMAP_OEA) 152 #define PMAPNAME(name) pmap32_##name 153 #elif defined(PMAP_OEA64) 154 #define PMAPNAME(name) pmap64_##name 155 #elif defined(PMAP_OEA64_BRIDGE) 156 #define PMAPNAME(name) pmap64bridge_##name 157 #else 158 #error unknown variant for pmap 159 #endif 160 #endif /* PMAP_EXLCUDE_DECLS && !PMAPNAME */ 161 162 #if defined(PMAPNAME) 163 #define STATIC static 164 #define pmap_pte_spill PMAPNAME(pte_spill) 165 #define pmap_real_memory PMAPNAME(real_memory) 166 #define pmap_init PMAPNAME(init) 167 #define pmap_virtual_space PMAPNAME(virtual_space) 168 #define pmap_create PMAPNAME(create) 169 #define pmap_reference PMAPNAME(reference) 170 #define pmap_destroy PMAPNAME(destroy) 171 #define pmap_copy PMAPNAME(copy) 172 #define pmap_update PMAPNAME(update) 173 #define pmap_enter PMAPNAME(enter) 174 #define pmap_remove PMAPNAME(remove) 175 #define pmap_kenter_pa PMAPNAME(kenter_pa) 176 #define pmap_kremove PMAPNAME(kremove) 177 #define pmap_extract PMAPNAME(extract) 178 #define pmap_protect PMAPNAME(protect) 179 #define pmap_unwire PMAPNAME(unwire) 180 #define pmap_page_protect PMAPNAME(page_protect) 181 #define pmap_query_bit PMAPNAME(query_bit) 182 #define pmap_clear_bit PMAPNAME(clear_bit) 183 184 #define pmap_activate PMAPNAME(activate) 185 #define pmap_deactivate PMAPNAME(deactivate) 186 187 #define pmap_pinit PMAPNAME(pinit) 188 #define pmap_procwr PMAPNAME(procwr) 189 190 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 191 #define pmap_pte_print PMAPNAME(pte_print) 192 #define pmap_pteg_check PMAPNAME(pteg_check) 193 #define pmap_print_mmruregs PMAPNAME(print_mmuregs) 194 #define pmap_print_pte PMAPNAME(print_pte) 195 #define pmap_pteg_dist PMAPNAME(pteg_dist) 196 #endif 197 #if defined(DEBUG) || defined(PMAPCHECK) 198 #define pmap_pvo_verify PMAPNAME(pvo_verify) 199 #define pmapcheck PMAPNAME(check) 200 #endif 201 #if defined(DEBUG) || defined(PMAPDEBUG) 202 #define pmapdebug PMAPNAME(debug) 203 #endif 204 #define pmap_steal_memory PMAPNAME(steal_memory) 205 #define pmap_bootstrap PMAPNAME(bootstrap) 206 #else 207 #define STATIC /* nothing */ 208 #endif /* PMAPNAME */ 209 210 STATIC int pmap_pte_spill(struct pmap *, vaddr_t, bool); 211 STATIC void pmap_real_memory(paddr_t *, psize_t *); 212 STATIC void pmap_init(void); 213 STATIC void pmap_virtual_space(vaddr_t *, vaddr_t *); 214 STATIC pmap_t pmap_create(void); 215 STATIC void pmap_reference(pmap_t); 216 STATIC void pmap_destroy(pmap_t); 217 STATIC void pmap_copy(pmap_t, pmap_t, vaddr_t, vsize_t, vaddr_t); 218 STATIC void pmap_update(pmap_t); 219 STATIC int pmap_enter(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int); 220 STATIC void pmap_remove(pmap_t, vaddr_t, vaddr_t); 221 STATIC void pmap_kenter_pa(vaddr_t, paddr_t, vm_prot_t, u_int); 222 STATIC void pmap_kremove(vaddr_t, vsize_t); 223 STATIC bool pmap_extract(pmap_t, vaddr_t, paddr_t *); 224 225 STATIC void pmap_protect(pmap_t, vaddr_t, vaddr_t, vm_prot_t); 226 STATIC void pmap_unwire(pmap_t, vaddr_t); 227 STATIC void pmap_page_protect(struct vm_page *, vm_prot_t); 228 STATIC bool pmap_query_bit(struct vm_page *, int); 229 STATIC bool pmap_clear_bit(struct vm_page *, int); 230 231 STATIC void pmap_activate(struct lwp *); 232 STATIC void pmap_deactivate(struct lwp *); 233 234 STATIC void pmap_pinit(pmap_t pm); 235 STATIC void pmap_procwr(struct proc *, vaddr_t, size_t); 236 237 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 238 STATIC void pmap_pte_print(volatile struct pte *); 239 STATIC void pmap_pteg_check(void); 240 STATIC void pmap_print_mmuregs(void); 241 STATIC void pmap_print_pte(pmap_t, vaddr_t); 242 STATIC void pmap_pteg_dist(void); 243 #endif 244 #if defined(DEBUG) || defined(PMAPCHECK) 245 STATIC void pmap_pvo_verify(void); 246 #endif 247 STATIC vaddr_t pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *); 248 STATIC void pmap_bootstrap(paddr_t, paddr_t); 249 250 #ifdef PMAPNAME 251 const struct pmap_ops PMAPNAME(ops) = { 252 .pmapop_pte_spill = pmap_pte_spill, 253 .pmapop_real_memory = pmap_real_memory, 254 .pmapop_init = pmap_init, 255 .pmapop_virtual_space = pmap_virtual_space, 256 .pmapop_create = pmap_create, 257 .pmapop_reference = pmap_reference, 258 .pmapop_destroy = pmap_destroy, 259 .pmapop_copy = pmap_copy, 260 .pmapop_update = pmap_update, 261 .pmapop_enter = pmap_enter, 262 .pmapop_remove = pmap_remove, 263 .pmapop_kenter_pa = pmap_kenter_pa, 264 .pmapop_kremove = pmap_kremove, 265 .pmapop_extract = pmap_extract, 266 .pmapop_protect = pmap_protect, 267 .pmapop_unwire = pmap_unwire, 268 .pmapop_page_protect = pmap_page_protect, 269 .pmapop_query_bit = pmap_query_bit, 270 .pmapop_clear_bit = pmap_clear_bit, 271 .pmapop_activate = pmap_activate, 272 .pmapop_deactivate = pmap_deactivate, 273 .pmapop_pinit = pmap_pinit, 274 .pmapop_procwr = pmap_procwr, 275 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 276 .pmapop_pte_print = pmap_pte_print, 277 .pmapop_pteg_check = pmap_pteg_check, 278 .pmapop_print_mmuregs = pmap_print_mmuregs, 279 .pmapop_print_pte = pmap_print_pte, 280 .pmapop_pteg_dist = pmap_pteg_dist, 281 #else 282 .pmapop_pte_print = NULL, 283 .pmapop_pteg_check = NULL, 284 .pmapop_print_mmuregs = NULL, 285 .pmapop_print_pte = NULL, 286 .pmapop_pteg_dist = NULL, 287 #endif 288 #if defined(DEBUG) || defined(PMAPCHECK) 289 .pmapop_pvo_verify = pmap_pvo_verify, 290 #else 291 .pmapop_pvo_verify = NULL, 292 #endif 293 .pmapop_steal_memory = pmap_steal_memory, 294 .pmapop_bootstrap = pmap_bootstrap, 295 }; 296 #endif /* !PMAPNAME */ 297 298 /* 299 * The following structure is aligned to 32 bytes 300 */ 301 struct pvo_entry { 302 LIST_ENTRY(pvo_entry) pvo_vlink; /* Link to common virt page */ 303 TAILQ_ENTRY(pvo_entry) pvo_olink; /* Link to overflow entry */ 304 struct pte pvo_pte; /* Prebuilt PTE */ 305 pmap_t pvo_pmap; /* ptr to owning pmap */ 306 vaddr_t pvo_vaddr; /* VA of entry */ 307 #define PVO_PTEGIDX_MASK 0x0007 /* which PTEG slot */ 308 #define PVO_PTEGIDX_VALID 0x0008 /* slot is valid */ 309 #define PVO_WIRED 0x0010 /* PVO entry is wired */ 310 #define PVO_MANAGED 0x0020 /* PVO e. for managed page */ 311 #define PVO_EXECUTABLE 0x0040 /* PVO e. for executable page */ 312 #define PVO_WIRED_P(pvo) ((pvo)->pvo_vaddr & PVO_WIRED) 313 #define PVO_MANAGED_P(pvo) ((pvo)->pvo_vaddr & PVO_MANAGED) 314 #define PVO_EXECUTABLE_P(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE) 315 #define PVO_ENTER_INSERT 0 /* PVO has been removed */ 316 #define PVO_SPILL_UNSET 1 /* PVO has been evicted */ 317 #define PVO_SPILL_SET 2 /* PVO has been spilled */ 318 #define PVO_SPILL_INSERT 3 /* PVO has been inserted */ 319 #define PVO_PMAP_PAGE_PROTECT 4 /* PVO has changed */ 320 #define PVO_PMAP_PROTECT 5 /* PVO has changed */ 321 #define PVO_REMOVE 6 /* PVO has been removed */ 322 #define PVO_WHERE_MASK 15 323 #define PVO_WHERE_SHFT 8 324 } __attribute__ ((aligned (32))); 325 #define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) 326 #define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) 327 #define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) 328 #define PVO_PTEGIDX_CLR(pvo) \ 329 ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) 330 #define PVO_PTEGIDX_SET(pvo,i) \ 331 ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) 332 #define PVO_WHERE(pvo,w) \ 333 ((pvo)->pvo_vaddr &= ~(PVO_WHERE_MASK << PVO_WHERE_SHFT), \ 334 (pvo)->pvo_vaddr |= ((PVO_ ## w) << PVO_WHERE_SHFT)) 335 336 TAILQ_HEAD(pvo_tqhead, pvo_entry); 337 struct pvo_tqhead *pmap_pvo_table; /* pvo entries by ptegroup index */ 338 static struct pvo_head pmap_pvo_kunmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged); /* list of unmanaged pages */ 339 static struct pvo_head pmap_pvo_unmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged); /* list of unmanaged pages */ 340 341 struct pool pmap_pool; /* pool for pmap structures */ 342 struct pool pmap_upvo_pool; /* pool for pvo entries for unmanaged pages */ 343 struct pool pmap_mpvo_pool; /* pool for pvo entries for managed pages */ 344 345 /* 346 * We keep a cache of unmanaged pages to be used for pvo entries for 347 * unmanaged pages. 348 */ 349 struct pvo_page { 350 SIMPLEQ_ENTRY(pvo_page) pvop_link; 351 }; 352 SIMPLEQ_HEAD(pvop_head, pvo_page); 353 static struct pvop_head pmap_upvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_upvop_head); 354 static struct pvop_head pmap_mpvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_mpvop_head); 355 u_long pmap_upvop_free; 356 u_long pmap_upvop_maxfree; 357 u_long pmap_mpvop_free; 358 u_long pmap_mpvop_maxfree; 359 360 static void *pmap_pool_ualloc(struct pool *, int); 361 static void *pmap_pool_malloc(struct pool *, int); 362 363 static void pmap_pool_ufree(struct pool *, void *); 364 static void pmap_pool_mfree(struct pool *, void *); 365 366 static struct pool_allocator pmap_pool_mallocator = { 367 .pa_alloc = pmap_pool_malloc, 368 .pa_free = pmap_pool_mfree, 369 .pa_pagesz = 0, 370 }; 371 372 static struct pool_allocator pmap_pool_uallocator = { 373 .pa_alloc = pmap_pool_ualloc, 374 .pa_free = pmap_pool_ufree, 375 .pa_pagesz = 0, 376 }; 377 378 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 379 void pmap_pte_print(volatile struct pte *); 380 void pmap_pteg_check(void); 381 void pmap_pteg_dist(void); 382 void pmap_print_pte(pmap_t, vaddr_t); 383 void pmap_print_mmuregs(void); 384 #endif 385 386 #if defined(DEBUG) || defined(PMAPCHECK) 387 #ifdef PMAPCHECK 388 int pmapcheck = 1; 389 #else 390 int pmapcheck = 0; 391 #endif 392 void pmap_pvo_verify(void); 393 static void pmap_pvo_check(const struct pvo_entry *); 394 #define PMAP_PVO_CHECK(pvo) \ 395 do { \ 396 if (pmapcheck) \ 397 pmap_pvo_check(pvo); \ 398 } while (0) 399 #else 400 #define PMAP_PVO_CHECK(pvo) do { } while (/*CONSTCOND*/0) 401 #endif 402 static int pmap_pte_insert(int, struct pte *); 403 static int pmap_pvo_enter(pmap_t, struct pool *, struct pvo_head *, 404 vaddr_t, paddr_t, register_t, int); 405 static void pmap_pvo_remove(struct pvo_entry *, int, struct pvo_head *); 406 static void pmap_pvo_free(struct pvo_entry *); 407 static void pmap_pvo_free_list(struct pvo_head *); 408 static struct pvo_entry *pmap_pvo_find_va(pmap_t, vaddr_t, int *); 409 static volatile struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int); 410 static struct pvo_entry *pmap_pvo_reclaim(struct pmap *); 411 static void pvo_set_exec(struct pvo_entry *); 412 static void pvo_clear_exec(struct pvo_entry *); 413 414 static void tlbia(void); 415 416 static void pmap_release(pmap_t); 417 static paddr_t pmap_boot_find_memory(psize_t, psize_t, int); 418 419 static uint32_t pmap_pvo_reclaim_nextidx; 420 #ifdef DEBUG 421 static int pmap_pvo_reclaim_debugctr; 422 #endif 423 424 #define VSID_NBPW (sizeof(uint32_t) * 8) 425 static uint32_t pmap_vsid_bitmap[NPMAPS / VSID_NBPW]; 426 427 static int pmap_initialized; 428 429 #if defined(DEBUG) || defined(PMAPDEBUG) 430 #define PMAPDEBUG_BOOT 0x0001 431 #define PMAPDEBUG_PTE 0x0002 432 #define PMAPDEBUG_EXEC 0x0008 433 #define PMAPDEBUG_PVOENTER 0x0010 434 #define PMAPDEBUG_PVOREMOVE 0x0020 435 #define PMAPDEBUG_ACTIVATE 0x0100 436 #define PMAPDEBUG_CREATE 0x0200 437 #define PMAPDEBUG_ENTER 0x1000 438 #define PMAPDEBUG_KENTER 0x2000 439 #define PMAPDEBUG_KREMOVE 0x4000 440 #define PMAPDEBUG_REMOVE 0x8000 441 442 unsigned int pmapdebug = 0; 443 444 # define DPRINTF(x) printf x 445 # define DPRINTFN(n, x) if (pmapdebug & PMAPDEBUG_ ## n) printf x 446 #else 447 # define DPRINTF(x) 448 # define DPRINTFN(n, x) 449 #endif 450 451 452 #ifdef PMAPCOUNTERS 453 /* 454 * From pmap_subr.c 455 */ 456 extern struct evcnt pmap_evcnt_mappings; 457 extern struct evcnt pmap_evcnt_unmappings; 458 459 extern struct evcnt pmap_evcnt_kernel_mappings; 460 extern struct evcnt pmap_evcnt_kernel_unmappings; 461 462 extern struct evcnt pmap_evcnt_mappings_replaced; 463 464 extern struct evcnt pmap_evcnt_exec_mappings; 465 extern struct evcnt pmap_evcnt_exec_cached; 466 467 extern struct evcnt pmap_evcnt_exec_synced; 468 extern struct evcnt pmap_evcnt_exec_synced_clear_modify; 469 extern struct evcnt pmap_evcnt_exec_synced_pvo_remove; 470 471 extern struct evcnt pmap_evcnt_exec_uncached_page_protect; 472 extern struct evcnt pmap_evcnt_exec_uncached_clear_modify; 473 extern struct evcnt pmap_evcnt_exec_uncached_zero_page; 474 extern struct evcnt pmap_evcnt_exec_uncached_copy_page; 475 extern struct evcnt pmap_evcnt_exec_uncached_pvo_remove; 476 477 extern struct evcnt pmap_evcnt_updates; 478 extern struct evcnt pmap_evcnt_collects; 479 extern struct evcnt pmap_evcnt_copies; 480 481 extern struct evcnt pmap_evcnt_ptes_spilled; 482 extern struct evcnt pmap_evcnt_ptes_unspilled; 483 extern struct evcnt pmap_evcnt_ptes_evicted; 484 485 extern struct evcnt pmap_evcnt_ptes_primary[8]; 486 extern struct evcnt pmap_evcnt_ptes_secondary[8]; 487 extern struct evcnt pmap_evcnt_ptes_removed; 488 extern struct evcnt pmap_evcnt_ptes_changed; 489 extern struct evcnt pmap_evcnt_pvos_reclaimed; 490 extern struct evcnt pmap_evcnt_pvos_failed; 491 492 extern struct evcnt pmap_evcnt_zeroed_pages; 493 extern struct evcnt pmap_evcnt_copied_pages; 494 extern struct evcnt pmap_evcnt_idlezeroed_pages; 495 496 #define PMAPCOUNT(ev) ((pmap_evcnt_ ## ev).ev_count++) 497 #define PMAPCOUNT2(ev) ((ev).ev_count++) 498 #else 499 #define PMAPCOUNT(ev) ((void) 0) 500 #define PMAPCOUNT2(ev) ((void) 0) 501 #endif 502 503 #define TLBIE(va) __asm volatile("tlbie %0" :: "r"(va)) 504 505 /* XXXSL: this needs to be moved to assembler */ 506 #define TLBIEL(va) __asm __volatile("tlbie %0" :: "r"(va)) 507 508 #define TLBSYNC() __asm volatile("tlbsync") 509 #define SYNC() __asm volatile("sync") 510 #define EIEIO() __asm volatile("eieio") 511 #define DCBST(va) __asm __volatile("dcbst 0,%0" :: "r"(va)) 512 #define MFMSR() mfmsr() 513 #define MTMSR(psl) mtmsr(psl) 514 #define MFPVR() mfpvr() 515 #define MFSRIN(va) mfsrin(va) 516 #define MFTB() mfrtcltbl() 517 518 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 519 static inline register_t 520 mfsrin(vaddr_t va) 521 { 522 register_t sr; 523 __asm volatile ("mfsrin %0,%1" : "=r"(sr) : "r"(va)); 524 return sr; 525 } 526 #endif /* PMAP_OEA*/ 527 528 #if defined (PMAP_OEA64_BRIDGE) 529 extern void mfmsr64 (register64_t *result); 530 #endif /* PMAP_OEA64_BRIDGE */ 531 532 #define PMAP_LOCK() KERNEL_LOCK(1, NULL) 533 #define PMAP_UNLOCK() KERNEL_UNLOCK_ONE(NULL) 534 535 static inline register_t 536 pmap_interrupts_off(void) 537 { 538 register_t msr = MFMSR(); 539 if (msr & PSL_EE) 540 MTMSR(msr & ~PSL_EE); 541 return msr; 542 } 543 544 static void 545 pmap_interrupts_restore(register_t msr) 546 { 547 if (msr & PSL_EE) 548 MTMSR(msr); 549 } 550 551 static inline u_int32_t 552 mfrtcltbl(void) 553 { 554 #ifdef PPC_OEA601 555 if ((MFPVR() >> 16) == MPC601) 556 return (mfrtcl() >> 7); 557 else 558 #endif 559 return (mftbl()); 560 } 561 562 /* 563 * These small routines may have to be replaced, 564 * if/when we support processors other that the 604. 565 */ 566 567 void 568 tlbia(void) 569 { 570 char *i; 571 572 SYNC(); 573 #if defined(PMAP_OEA) 574 /* 575 * Why not use "tlbia"? Because not all processors implement it. 576 * 577 * This needs to be a per-CPU callback to do the appropriate thing 578 * for the CPU. XXX 579 */ 580 for (i = 0; i < (char *)0x00040000; i += 0x00001000) { 581 TLBIE(i); 582 EIEIO(); 583 SYNC(); 584 } 585 #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE) 586 /* This is specifically for the 970, 970UM v1.6 pp. 140. */ 587 for (i = 0; i <= (char *)0xFF000; i += 0x00001000) { 588 TLBIEL(i); 589 EIEIO(); 590 SYNC(); 591 } 592 #endif 593 TLBSYNC(); 594 SYNC(); 595 } 596 597 static inline register_t 598 va_to_vsid(const struct pmap *pm, vaddr_t addr) 599 { 600 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 601 return (pm->pm_sr[addr >> ADDR_SR_SHFT] & SR_VSID) >> SR_VSID_SHFT; 602 #else /* PMAP_OEA64 */ 603 #if 0 604 const struct ste *ste; 605 register_t hash; 606 int i; 607 608 hash = (addr >> ADDR_ESID_SHFT) & ADDR_ESID_HASH; 609 610 /* 611 * Try the primary group first 612 */ 613 ste = pm->pm_stes[hash].stes; 614 for (i = 0; i < 8; i++, ste++) { 615 if (ste->ste_hi & STE_V) && 616 (addr & ~(ADDR_POFF|ADDR_PIDX)) == (ste->ste_hi & STE_ESID)) 617 return ste; 618 } 619 620 /* 621 * Then the secondary group. 622 */ 623 ste = pm->pm_stes[hash ^ ADDR_ESID_HASH].stes; 624 for (i = 0; i < 8; i++, ste++) { 625 if (ste->ste_hi & STE_V) && 626 (addr & ~(ADDR_POFF|ADDR_PIDX)) == (ste->ste_hi & STE_ESID)) 627 return addr; 628 } 629 630 return NULL; 631 #else 632 /* 633 * Rather than searching the STE groups for the VSID, we know 634 * how we generate that from the ESID and so do that. 635 */ 636 return VSID_MAKE(addr >> ADDR_SR_SHFT, pm->pm_vsid) >> SR_VSID_SHFT; 637 #endif 638 #endif /* PMAP_OEA */ 639 } 640 641 static inline register_t 642 va_to_pteg(const struct pmap *pm, vaddr_t addr) 643 { 644 register_t hash; 645 646 hash = va_to_vsid(pm, addr) ^ ((addr & ADDR_PIDX) >> ADDR_PIDX_SHFT); 647 return hash & pmap_pteg_mask; 648 } 649 650 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 651 /* 652 * Given a PTE in the page table, calculate the VADDR that hashes to it. 653 * The only bit of magic is that the top 4 bits of the address doesn't 654 * technically exist in the PTE. But we know we reserved 4 bits of the 655 * VSID for it so that's how we get it. 656 */ 657 static vaddr_t 658 pmap_pte_to_va(volatile const struct pte *pt) 659 { 660 vaddr_t va; 661 uintptr_t ptaddr = (uintptr_t) pt; 662 663 if (pt->pte_hi & PTE_HID) 664 ptaddr ^= (pmap_pteg_mask * sizeof(struct pteg)); 665 666 /* PPC Bits 10-19 PPC64 Bits 42-51 */ 667 #if defined(PMAP_OEA) 668 va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x3ff; 669 #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE) 670 va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x7ff; 671 #endif 672 va <<= ADDR_PIDX_SHFT; 673 674 /* PPC Bits 4-9 PPC64 Bits 36-41 */ 675 va |= (pt->pte_hi & PTE_API) << ADDR_API_SHFT; 676 677 #if defined(PMAP_OEA64) 678 /* PPC63 Bits 0-35 */ 679 /* va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT; */ 680 #elif defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE) 681 /* PPC Bits 0-3 */ 682 va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT; 683 #endif 684 685 return va; 686 } 687 #endif 688 689 static inline struct pvo_head * 690 pa_to_pvoh(paddr_t pa, struct vm_page **pg_p) 691 { 692 struct vm_page *pg; 693 struct vm_page_md *md; 694 695 pg = PHYS_TO_VM_PAGE(pa); 696 if (pg_p != NULL) 697 *pg_p = pg; 698 if (pg == NULL) 699 return &pmap_pvo_unmanaged; 700 md = VM_PAGE_TO_MD(pg); 701 return &md->mdpg_pvoh; 702 } 703 704 static inline struct pvo_head * 705 vm_page_to_pvoh(struct vm_page *pg) 706 { 707 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 708 709 return &md->mdpg_pvoh; 710 } 711 712 713 static inline void 714 pmap_attr_clear(struct vm_page *pg, int ptebit) 715 { 716 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 717 718 md->mdpg_attrs &= ~ptebit; 719 } 720 721 static inline int 722 pmap_attr_fetch(struct vm_page *pg) 723 { 724 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 725 726 return md->mdpg_attrs; 727 } 728 729 static inline void 730 pmap_attr_save(struct vm_page *pg, int ptebit) 731 { 732 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 733 734 md->mdpg_attrs |= ptebit; 735 } 736 737 static inline int 738 pmap_pte_compare(const volatile struct pte *pt, const struct pte *pvo_pt) 739 { 740 if (pt->pte_hi == pvo_pt->pte_hi 741 #if 0 742 && ((pt->pte_lo ^ pvo_pt->pte_lo) & 743 ~(PTE_REF|PTE_CHG)) == 0 744 #endif 745 ) 746 return 1; 747 return 0; 748 } 749 750 static inline void 751 pmap_pte_create(struct pte *pt, const struct pmap *pm, vaddr_t va, register_t pte_lo) 752 { 753 /* 754 * Construct the PTE. Default to IMB initially. Valid bit 755 * only gets set when the real pte is set in memory. 756 * 757 * Note: Don't set the valid bit for correct operation of tlb update. 758 */ 759 #if defined(PMAP_OEA) 760 pt->pte_hi = (va_to_vsid(pm, va) << PTE_VSID_SHFT) 761 | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API); 762 pt->pte_lo = pte_lo; 763 #elif defined (PMAP_OEA64_BRIDGE) 764 pt->pte_hi = ((u_int64_t)va_to_vsid(pm, va) << PTE_VSID_SHFT) 765 | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API); 766 pt->pte_lo = (u_int64_t) pte_lo; 767 #elif defined (PMAP_OEA64) 768 #error PMAP_OEA64 not supported 769 #endif /* PMAP_OEA */ 770 } 771 772 static inline void 773 pmap_pte_synch(volatile struct pte *pt, struct pte *pvo_pt) 774 { 775 pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF|PTE_CHG); 776 } 777 778 static inline void 779 pmap_pte_clear(volatile struct pte *pt, vaddr_t va, int ptebit) 780 { 781 /* 782 * As shown in Section 7.6.3.2.3 783 */ 784 pt->pte_lo &= ~ptebit; 785 TLBIE(va); 786 SYNC(); 787 EIEIO(); 788 TLBSYNC(); 789 SYNC(); 790 #ifdef MULTIPROCESSOR 791 DCBST(pt); 792 #endif 793 } 794 795 static inline void 796 pmap_pte_set(volatile struct pte *pt, struct pte *pvo_pt) 797 { 798 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 799 if (pvo_pt->pte_hi & PTE_VALID) 800 panic("pte_set: setting an already valid pte %p", pvo_pt); 801 #endif 802 pvo_pt->pte_hi |= PTE_VALID; 803 804 /* 805 * Update the PTE as defined in section 7.6.3.1 806 * Note that the REF/CHG bits are from pvo_pt and thus should 807 * have been saved so this routine can restore them (if desired). 808 */ 809 pt->pte_lo = pvo_pt->pte_lo; 810 EIEIO(); 811 pt->pte_hi = pvo_pt->pte_hi; 812 TLBSYNC(); 813 SYNC(); 814 #ifdef MULTIPROCESSOR 815 DCBST(pt); 816 #endif 817 pmap_pte_valid++; 818 } 819 820 static inline void 821 pmap_pte_unset(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va) 822 { 823 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 824 if ((pvo_pt->pte_hi & PTE_VALID) == 0) 825 panic("pte_unset: attempt to unset an inactive pte#1 %p/%p", pvo_pt, pt); 826 if ((pt->pte_hi & PTE_VALID) == 0) 827 panic("pte_unset: attempt to unset an inactive pte#2 %p/%p", pvo_pt, pt); 828 #endif 829 830 pvo_pt->pte_hi &= ~PTE_VALID; 831 /* 832 * Force the ref & chg bits back into the PTEs. 833 */ 834 SYNC(); 835 /* 836 * Invalidate the pte ... (Section 7.6.3.3) 837 */ 838 pt->pte_hi &= ~PTE_VALID; 839 SYNC(); 840 TLBIE(va); 841 SYNC(); 842 EIEIO(); 843 TLBSYNC(); 844 SYNC(); 845 /* 846 * Save the ref & chg bits ... 847 */ 848 pmap_pte_synch(pt, pvo_pt); 849 pmap_pte_valid--; 850 } 851 852 static inline void 853 pmap_pte_change(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va) 854 { 855 /* 856 * Invalidate the PTE 857 */ 858 pmap_pte_unset(pt, pvo_pt, va); 859 pmap_pte_set(pt, pvo_pt); 860 } 861 862 /* 863 * Try to insert the PTE @ *pvo_pt into the pmap_pteg_table at ptegidx 864 * (either primary or secondary location). 865 * 866 * Note: both the destination and source PTEs must not have PTE_VALID set. 867 */ 868 869 static int 870 pmap_pte_insert(int ptegidx, struct pte *pvo_pt) 871 { 872 volatile struct pte *pt; 873 int i; 874 875 #if defined(DEBUG) 876 DPRINTFN(PTE, ("pmap_pte_insert: idx %#x, pte %#" _PRIxpte " %#" _PRIxpte "\n", 877 ptegidx, pvo_pt->pte_hi, pvo_pt->pte_lo)); 878 #endif 879 /* 880 * First try primary hash. 881 */ 882 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 883 if ((pt->pte_hi & PTE_VALID) == 0) { 884 pvo_pt->pte_hi &= ~PTE_HID; 885 pmap_pte_set(pt, pvo_pt); 886 return i; 887 } 888 } 889 890 /* 891 * Now try secondary hash. 892 */ 893 ptegidx ^= pmap_pteg_mask; 894 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 895 if ((pt->pte_hi & PTE_VALID) == 0) { 896 pvo_pt->pte_hi |= PTE_HID; 897 pmap_pte_set(pt, pvo_pt); 898 return i; 899 } 900 } 901 return -1; 902 } 903 904 /* 905 * Spill handler. 906 * 907 * Tries to spill a page table entry from the overflow area. 908 * This runs in either real mode (if dealing with a exception spill) 909 * or virtual mode when dealing with manually spilling one of the 910 * kernel's pte entries. In either case, interrupts are already 911 * disabled. 912 */ 913 914 int 915 pmap_pte_spill(struct pmap *pm, vaddr_t addr, bool exec) 916 { 917 struct pvo_entry *source_pvo, *victim_pvo, *next_pvo; 918 struct pvo_entry *pvo; 919 /* XXX: gcc -- vpvoh is always set at either *1* or *2* */ 920 struct pvo_tqhead *pvoh, *vpvoh = NULL; 921 int ptegidx, i, j; 922 volatile struct pteg *pteg; 923 volatile struct pte *pt; 924 925 PMAP_LOCK(); 926 927 ptegidx = va_to_pteg(pm, addr); 928 929 /* 930 * Have to substitute some entry. Use the primary hash for this. 931 * Use low bits of timebase as random generator. Make sure we are 932 * not picking a kernel pte for replacement. 933 */ 934 pteg = &pmap_pteg_table[ptegidx]; 935 i = MFTB() & 7; 936 for (j = 0; j < 8; j++) { 937 pt = &pteg->pt[i]; 938 if ((pt->pte_hi & PTE_VALID) == 0) 939 break; 940 if (VSID_TO_HASH((pt->pte_hi & PTE_VSID) >> PTE_VSID_SHFT) 941 < PHYSMAP_VSIDBITS) 942 break; 943 i = (i + 1) & 7; 944 } 945 KASSERT(j < 8); 946 947 source_pvo = NULL; 948 victim_pvo = NULL; 949 pvoh = &pmap_pvo_table[ptegidx]; 950 TAILQ_FOREACH(pvo, pvoh, pvo_olink) { 951 952 /* 953 * We need to find pvo entry for this address... 954 */ 955 PMAP_PVO_CHECK(pvo); /* sanity check */ 956 957 /* 958 * If we haven't found the source and we come to a PVO with 959 * a valid PTE, then we know we can't find it because all 960 * evicted PVOs always are first in the list. 961 */ 962 if (source_pvo == NULL && (pvo->pvo_pte.pte_hi & PTE_VALID)) 963 break; 964 if (source_pvo == NULL && pm == pvo->pvo_pmap && 965 addr == PVO_VADDR(pvo)) { 966 967 /* 968 * Now we have found the entry to be spilled into the 969 * pteg. Attempt to insert it into the page table. 970 */ 971 j = pmap_pte_insert(ptegidx, &pvo->pvo_pte); 972 if (j >= 0) { 973 PVO_PTEGIDX_SET(pvo, j); 974 PMAP_PVO_CHECK(pvo); /* sanity check */ 975 PVO_WHERE(pvo, SPILL_INSERT); 976 pvo->pvo_pmap->pm_evictions--; 977 PMAPCOUNT(ptes_spilled); 978 PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID) 979 ? pmap_evcnt_ptes_secondary 980 : pmap_evcnt_ptes_primary)[j]); 981 982 /* 983 * Since we keep the evicted entries at the 984 * from of the PVO list, we need move this 985 * (now resident) PVO after the evicted 986 * entries. 987 */ 988 next_pvo = TAILQ_NEXT(pvo, pvo_olink); 989 990 /* 991 * If we don't have to move (either we were the 992 * last entry or the next entry was valid), 993 * don't change our position. Otherwise 994 * move ourselves to the tail of the queue. 995 */ 996 if (next_pvo != NULL && 997 !(next_pvo->pvo_pte.pte_hi & PTE_VALID)) { 998 TAILQ_REMOVE(pvoh, pvo, pvo_olink); 999 TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink); 1000 } 1001 PMAP_UNLOCK(); 1002 return 1; 1003 } 1004 source_pvo = pvo; 1005 if (exec && !PVO_EXECUTABLE_P(source_pvo)) { 1006 return 0; 1007 } 1008 if (victim_pvo != NULL) 1009 break; 1010 } 1011 1012 /* 1013 * We also need the pvo entry of the victim we are replacing 1014 * so save the R & C bits of the PTE. 1015 */ 1016 if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && 1017 pmap_pte_compare(pt, &pvo->pvo_pte)) { 1018 vpvoh = pvoh; /* *1* */ 1019 victim_pvo = pvo; 1020 if (source_pvo != NULL) 1021 break; 1022 } 1023 } 1024 1025 if (source_pvo == NULL) { 1026 PMAPCOUNT(ptes_unspilled); 1027 PMAP_UNLOCK(); 1028 return 0; 1029 } 1030 1031 if (victim_pvo == NULL) { 1032 if ((pt->pte_hi & PTE_HID) == 0) 1033 panic("pmap_pte_spill: victim p-pte (%p) has " 1034 "no pvo entry!", pt); 1035 1036 /* 1037 * If this is a secondary PTE, we need to search 1038 * its primary pvo bucket for the matching PVO. 1039 */ 1040 vpvoh = &pmap_pvo_table[ptegidx ^ pmap_pteg_mask]; /* *2* */ 1041 TAILQ_FOREACH(pvo, vpvoh, pvo_olink) { 1042 PMAP_PVO_CHECK(pvo); /* sanity check */ 1043 1044 /* 1045 * We also need the pvo entry of the victim we are 1046 * replacing so save the R & C bits of the PTE. 1047 */ 1048 if (pmap_pte_compare(pt, &pvo->pvo_pte)) { 1049 victim_pvo = pvo; 1050 break; 1051 } 1052 } 1053 if (victim_pvo == NULL) 1054 panic("pmap_pte_spill: victim s-pte (%p) has " 1055 "no pvo entry!", pt); 1056 } 1057 1058 /* 1059 * The victim should be not be a kernel PVO/PTE entry. 1060 */ 1061 KASSERT(victim_pvo->pvo_pmap != pmap_kernel()); 1062 KASSERT(PVO_PTEGIDX_ISSET(victim_pvo)); 1063 KASSERT(PVO_PTEGIDX_GET(victim_pvo) == i); 1064 1065 /* 1066 * We are invalidating the TLB entry for the EA for the 1067 * we are replacing even though its valid; If we don't 1068 * we lose any ref/chg bit changes contained in the TLB 1069 * entry. 1070 */ 1071 source_pvo->pvo_pte.pte_hi &= ~PTE_HID; 1072 1073 /* 1074 * To enforce the PVO list ordering constraint that all 1075 * evicted entries should come before all valid entries, 1076 * move the source PVO to the tail of its list and the 1077 * victim PVO to the head of its list (which might not be 1078 * the same list, if the victim was using the secondary hash). 1079 */ 1080 TAILQ_REMOVE(pvoh, source_pvo, pvo_olink); 1081 TAILQ_INSERT_TAIL(pvoh, source_pvo, pvo_olink); 1082 TAILQ_REMOVE(vpvoh, victim_pvo, pvo_olink); 1083 TAILQ_INSERT_HEAD(vpvoh, victim_pvo, pvo_olink); 1084 pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr); 1085 pmap_pte_set(pt, &source_pvo->pvo_pte); 1086 victim_pvo->pvo_pmap->pm_evictions++; 1087 source_pvo->pvo_pmap->pm_evictions--; 1088 PVO_WHERE(victim_pvo, SPILL_UNSET); 1089 PVO_WHERE(source_pvo, SPILL_SET); 1090 1091 PVO_PTEGIDX_CLR(victim_pvo); 1092 PVO_PTEGIDX_SET(source_pvo, i); 1093 PMAPCOUNT2(pmap_evcnt_ptes_primary[i]); 1094 PMAPCOUNT(ptes_spilled); 1095 PMAPCOUNT(ptes_evicted); 1096 PMAPCOUNT(ptes_removed); 1097 1098 PMAP_PVO_CHECK(victim_pvo); 1099 PMAP_PVO_CHECK(source_pvo); 1100 1101 PMAP_UNLOCK(); 1102 return 1; 1103 } 1104 1105 /* 1106 * Restrict given range to physical memory 1107 */ 1108 void 1109 pmap_real_memory(paddr_t *start, psize_t *size) 1110 { 1111 struct mem_region *mp; 1112 1113 for (mp = mem; mp->size; mp++) { 1114 if (*start + *size > mp->start 1115 && *start < mp->start + mp->size) { 1116 if (*start < mp->start) { 1117 *size -= mp->start - *start; 1118 *start = mp->start; 1119 } 1120 if (*start + *size > mp->start + mp->size) 1121 *size = mp->start + mp->size - *start; 1122 return; 1123 } 1124 } 1125 *size = 0; 1126 } 1127 1128 /* 1129 * Initialize anything else for pmap handling. 1130 * Called during vm_init(). 1131 */ 1132 void 1133 pmap_init(void) 1134 { 1135 pool_init(&pmap_mpvo_pool, sizeof(struct pvo_entry), 1136 sizeof(struct pvo_entry), 0, 0, "pmap_mpvopl", 1137 &pmap_pool_mallocator, IPL_NONE); 1138 1139 pool_setlowat(&pmap_mpvo_pool, 1008); 1140 1141 pmap_initialized = 1; 1142 1143 } 1144 1145 /* 1146 * How much virtual space does the kernel get? 1147 */ 1148 void 1149 pmap_virtual_space(vaddr_t *start, vaddr_t *end) 1150 { 1151 /* 1152 * For now, reserve one segment (minus some overhead) for kernel 1153 * virtual memory 1154 */ 1155 *start = VM_MIN_KERNEL_ADDRESS; 1156 *end = VM_MAX_KERNEL_ADDRESS; 1157 } 1158 1159 /* 1160 * Allocate, initialize, and return a new physical map. 1161 */ 1162 pmap_t 1163 pmap_create(void) 1164 { 1165 pmap_t pm; 1166 1167 pm = pool_get(&pmap_pool, PR_WAITOK); 1168 memset((void *)pm, 0, sizeof *pm); 1169 pmap_pinit(pm); 1170 1171 DPRINTFN(CREATE,("pmap_create: pm %p:\n" 1172 "\t%#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr 1173 " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr "\n" 1174 "\t%#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr 1175 " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr "\n", 1176 pm, 1177 pm->pm_sr[0], pm->pm_sr[1], 1178 pm->pm_sr[2], pm->pm_sr[3], 1179 pm->pm_sr[4], pm->pm_sr[5], 1180 pm->pm_sr[6], pm->pm_sr[7], 1181 pm->pm_sr[8], pm->pm_sr[9], 1182 pm->pm_sr[10], pm->pm_sr[11], 1183 pm->pm_sr[12], pm->pm_sr[13], 1184 pm->pm_sr[14], pm->pm_sr[15])); 1185 return pm; 1186 } 1187 1188 /* 1189 * Initialize a preallocated and zeroed pmap structure. 1190 */ 1191 void 1192 pmap_pinit(pmap_t pm) 1193 { 1194 register_t entropy = MFTB(); 1195 register_t mask; 1196 int i; 1197 1198 /* 1199 * Allocate some segment registers for this pmap. 1200 */ 1201 pm->pm_refs = 1; 1202 PMAP_LOCK(); 1203 for (i = 0; i < NPMAPS; i += VSID_NBPW) { 1204 static register_t pmap_vsidcontext; 1205 register_t hash; 1206 unsigned int n; 1207 1208 /* Create a new value by multiplying by a prime adding in 1209 * entropy from the timebase register. This is to make the 1210 * VSID more random so that the PT Hash function collides 1211 * less often. (note that the prime causes gcc to do shifts 1212 * instead of a multiply) 1213 */ 1214 pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy; 1215 hash = pmap_vsidcontext & (NPMAPS - 1); 1216 if (hash == 0) { /* 0 is special, avoid it */ 1217 entropy += 0xbadf00d; 1218 continue; 1219 } 1220 n = hash >> 5; 1221 mask = 1L << (hash & (VSID_NBPW-1)); 1222 hash = pmap_vsidcontext; 1223 if (pmap_vsid_bitmap[n] & mask) { /* collision? */ 1224 /* anything free in this bucket? */ 1225 if (~pmap_vsid_bitmap[n] == 0) { 1226 entropy = hash ^ (hash >> 16); 1227 continue; 1228 } 1229 i = ffs(~pmap_vsid_bitmap[n]) - 1; 1230 mask = 1L << i; 1231 hash &= ~(VSID_NBPW-1); 1232 hash |= i; 1233 } 1234 hash &= PTE_VSID >> PTE_VSID_SHFT; 1235 pmap_vsid_bitmap[n] |= mask; 1236 pm->pm_vsid = hash; 1237 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 1238 for (i = 0; i < 16; i++) 1239 pm->pm_sr[i] = VSID_MAKE(i, hash) | SR_PRKEY | 1240 SR_NOEXEC; 1241 #endif 1242 PMAP_UNLOCK(); 1243 return; 1244 } 1245 PMAP_UNLOCK(); 1246 panic("pmap_pinit: out of segments"); 1247 } 1248 1249 /* 1250 * Add a reference to the given pmap. 1251 */ 1252 void 1253 pmap_reference(pmap_t pm) 1254 { 1255 atomic_inc_uint(&pm->pm_refs); 1256 } 1257 1258 /* 1259 * Retire the given pmap from service. 1260 * Should only be called if the map contains no valid mappings. 1261 */ 1262 void 1263 pmap_destroy(pmap_t pm) 1264 { 1265 if (atomic_dec_uint_nv(&pm->pm_refs) == 0) { 1266 pmap_release(pm); 1267 pool_put(&pmap_pool, pm); 1268 } 1269 } 1270 1271 /* 1272 * Release any resources held by the given physical map. 1273 * Called when a pmap initialized by pmap_pinit is being released. 1274 */ 1275 void 1276 pmap_release(pmap_t pm) 1277 { 1278 int idx, mask; 1279 1280 KASSERT(pm->pm_stats.resident_count == 0); 1281 KASSERT(pm->pm_stats.wired_count == 0); 1282 1283 PMAP_LOCK(); 1284 if (pm->pm_sr[0] == 0) 1285 panic("pmap_release"); 1286 idx = pm->pm_vsid & (NPMAPS-1); 1287 mask = 1 << (idx % VSID_NBPW); 1288 idx /= VSID_NBPW; 1289 1290 KASSERT(pmap_vsid_bitmap[idx] & mask); 1291 pmap_vsid_bitmap[idx] &= ~mask; 1292 PMAP_UNLOCK(); 1293 } 1294 1295 /* 1296 * Copy the range specified by src_addr/len 1297 * from the source map to the range dst_addr/len 1298 * in the destination map. 1299 * 1300 * This routine is only advisory and need not do anything. 1301 */ 1302 void 1303 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, 1304 vsize_t len, vaddr_t src_addr) 1305 { 1306 PMAPCOUNT(copies); 1307 } 1308 1309 /* 1310 * Require that all active physical maps contain no 1311 * incorrect entries NOW. 1312 */ 1313 void 1314 pmap_update(struct pmap *pmap) 1315 { 1316 PMAPCOUNT(updates); 1317 TLBSYNC(); 1318 } 1319 1320 static inline int 1321 pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 1322 { 1323 int pteidx; 1324 /* 1325 * We can find the actual pte entry without searching by 1326 * grabbing the PTEG index from 3 unused bits in pte_lo[11:9] 1327 * and by noticing the HID bit. 1328 */ 1329 pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); 1330 if (pvo->pvo_pte.pte_hi & PTE_HID) 1331 pteidx ^= pmap_pteg_mask * 8; 1332 return pteidx; 1333 } 1334 1335 volatile struct pte * 1336 pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 1337 { 1338 volatile struct pte *pt; 1339 1340 #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK) 1341 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) 1342 return NULL; 1343 #endif 1344 1345 /* 1346 * If we haven't been supplied the ptegidx, calculate it. 1347 */ 1348 if (pteidx == -1) { 1349 int ptegidx; 1350 ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr); 1351 pteidx = pmap_pvo_pte_index(pvo, ptegidx); 1352 } 1353 1354 pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7]; 1355 1356 #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK) 1357 return pt; 1358 #else 1359 if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { 1360 panic("pmap_pvo_to_pte: pvo %p: has valid pte in " 1361 "pvo but no valid pte index", pvo); 1362 } 1363 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { 1364 panic("pmap_pvo_to_pte: pvo %p: has valid pte index in " 1365 "pvo but no valid pte", pvo); 1366 } 1367 1368 if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { 1369 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) { 1370 #if defined(DEBUG) || defined(PMAPCHECK) 1371 pmap_pte_print(pt); 1372 #endif 1373 panic("pmap_pvo_to_pte: pvo %p: has valid pte in " 1374 "pmap_pteg_table %p but invalid in pvo", 1375 pvo, pt); 1376 } 1377 if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) != 0) { 1378 #if defined(DEBUG) || defined(PMAPCHECK) 1379 pmap_pte_print(pt); 1380 #endif 1381 panic("pmap_pvo_to_pte: pvo %p: pvo pte does " 1382 "not match pte %p in pmap_pteg_table", 1383 pvo, pt); 1384 } 1385 return pt; 1386 } 1387 1388 if (pvo->pvo_pte.pte_hi & PTE_VALID) { 1389 #if defined(DEBUG) || defined(PMAPCHECK) 1390 pmap_pte_print(pt); 1391 #endif 1392 panic("pmap_pvo_to_pte: pvo %p: has nomatching pte %p in " 1393 "pmap_pteg_table but valid in pvo", pvo, pt); 1394 } 1395 return NULL; 1396 #endif /* !(!DIAGNOSTIC && !DEBUG && !PMAPCHECK) */ 1397 } 1398 1399 struct pvo_entry * 1400 pmap_pvo_find_va(pmap_t pm, vaddr_t va, int *pteidx_p) 1401 { 1402 struct pvo_entry *pvo; 1403 int ptegidx; 1404 1405 va &= ~ADDR_POFF; 1406 ptegidx = va_to_pteg(pm, va); 1407 1408 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 1409 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1410 if ((uintptr_t) pvo >= SEGMENT_LENGTH) 1411 panic("pmap_pvo_find_va: invalid pvo %p on " 1412 "list %#x (%p)", pvo, ptegidx, 1413 &pmap_pvo_table[ptegidx]); 1414 #endif 1415 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1416 if (pteidx_p) 1417 *pteidx_p = pmap_pvo_pte_index(pvo, ptegidx); 1418 return pvo; 1419 } 1420 } 1421 if ((pm == pmap_kernel()) && (va < SEGMENT_LENGTH)) 1422 panic("%s: returning NULL for %s pmap, va: %#" _PRIxva "\n", 1423 __func__, (pm == pmap_kernel() ? "kernel" : "user"), va); 1424 return NULL; 1425 } 1426 1427 #if defined(DEBUG) || defined(PMAPCHECK) 1428 void 1429 pmap_pvo_check(const struct pvo_entry *pvo) 1430 { 1431 struct pvo_head *pvo_head; 1432 struct pvo_entry *pvo0; 1433 volatile struct pte *pt; 1434 int failed = 0; 1435 1436 PMAP_LOCK(); 1437 1438 if ((uintptr_t)(pvo+1) >= SEGMENT_LENGTH) 1439 panic("pmap_pvo_check: pvo %p: invalid address", pvo); 1440 1441 if ((uintptr_t)(pvo->pvo_pmap+1) >= SEGMENT_LENGTH) { 1442 printf("pmap_pvo_check: pvo %p: invalid pmap address %p\n", 1443 pvo, pvo->pvo_pmap); 1444 failed = 1; 1445 } 1446 1447 if ((uintptr_t)TAILQ_NEXT(pvo, pvo_olink) >= SEGMENT_LENGTH || 1448 (((uintptr_t)TAILQ_NEXT(pvo, pvo_olink)) & 0x1f) != 0) { 1449 printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n", 1450 pvo, TAILQ_NEXT(pvo, pvo_olink)); 1451 failed = 1; 1452 } 1453 1454 if ((uintptr_t)LIST_NEXT(pvo, pvo_vlink) >= SEGMENT_LENGTH || 1455 (((uintptr_t)LIST_NEXT(pvo, pvo_vlink)) & 0x1f) != 0) { 1456 printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n", 1457 pvo, LIST_NEXT(pvo, pvo_vlink)); 1458 failed = 1; 1459 } 1460 1461 if (PVO_MANAGED_P(pvo)) { 1462 pvo_head = pa_to_pvoh(pvo->pvo_pte.pte_lo & PTE_RPGN, NULL); 1463 } else { 1464 if (pvo->pvo_vaddr < VM_MIN_KERNEL_ADDRESS) { 1465 printf("pmap_pvo_check: pvo %p: non kernel address " 1466 "on kernel unmanaged list\n", pvo); 1467 failed = 1; 1468 } 1469 pvo_head = &pmap_pvo_kunmanaged; 1470 } 1471 LIST_FOREACH(pvo0, pvo_head, pvo_vlink) { 1472 if (pvo0 == pvo) 1473 break; 1474 } 1475 if (pvo0 == NULL) { 1476 printf("pmap_pvo_check: pvo %p: not present " 1477 "on its vlist head %p\n", pvo, pvo_head); 1478 failed = 1; 1479 } 1480 if (pvo != pmap_pvo_find_va(pvo->pvo_pmap, pvo->pvo_vaddr, NULL)) { 1481 printf("pmap_pvo_check: pvo %p: not present " 1482 "on its olist head\n", pvo); 1483 failed = 1; 1484 } 1485 pt = pmap_pvo_to_pte(pvo, -1); 1486 if (pt == NULL) { 1487 if (pvo->pvo_pte.pte_hi & PTE_VALID) { 1488 printf("pmap_pvo_check: pvo %p: pte_hi VALID but " 1489 "no PTE\n", pvo); 1490 failed = 1; 1491 } 1492 } else { 1493 if ((uintptr_t) pt < (uintptr_t) &pmap_pteg_table[0] || 1494 (uintptr_t) pt >= 1495 (uintptr_t) &pmap_pteg_table[pmap_pteg_cnt]) { 1496 printf("pmap_pvo_check: pvo %p: pte %p not in " 1497 "pteg table\n", pvo, pt); 1498 failed = 1; 1499 } 1500 if (((((uintptr_t) pt) >> 3) & 7) != PVO_PTEGIDX_GET(pvo)) { 1501 printf("pmap_pvo_check: pvo %p: pte_hi VALID but " 1502 "no PTE\n", pvo); 1503 failed = 1; 1504 } 1505 if (pvo->pvo_pte.pte_hi != pt->pte_hi) { 1506 printf("pmap_pvo_check: pvo %p: pte_hi differ: " 1507 "%#" _PRIxpte "/%#" _PRIxpte "\n", pvo, 1508 pvo->pvo_pte.pte_hi, 1509 pt->pte_hi); 1510 failed = 1; 1511 } 1512 if (((pvo->pvo_pte.pte_lo ^ pt->pte_lo) & 1513 (PTE_PP|PTE_WIMG|PTE_RPGN)) != 0) { 1514 printf("pmap_pvo_check: pvo %p: pte_lo differ: " 1515 "%#" _PRIxpte "/%#" _PRIxpte "\n", pvo, 1516 (pvo->pvo_pte.pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN)), 1517 (pt->pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN))); 1518 failed = 1; 1519 } 1520 if ((pmap_pte_to_va(pt) ^ PVO_VADDR(pvo)) & 0x0fffffff) { 1521 printf("pmap_pvo_check: pvo %p: PTE %p derived VA %#" _PRIxva "" 1522 " doesn't not match PVO's VA %#" _PRIxva "\n", 1523 pvo, pt, pmap_pte_to_va(pt), PVO_VADDR(pvo)); 1524 failed = 1; 1525 } 1526 if (failed) 1527 pmap_pte_print(pt); 1528 } 1529 if (failed) 1530 panic("pmap_pvo_check: pvo %p, pm %p: bugcheck!", pvo, 1531 pvo->pvo_pmap); 1532 1533 PMAP_UNLOCK(); 1534 } 1535 #endif /* DEBUG || PMAPCHECK */ 1536 1537 /* 1538 * Search the PVO table looking for a non-wired entry. 1539 * If we find one, remove it and return it. 1540 */ 1541 1542 struct pvo_entry * 1543 pmap_pvo_reclaim(struct pmap *pm) 1544 { 1545 struct pvo_tqhead *pvoh; 1546 struct pvo_entry *pvo; 1547 uint32_t idx, endidx; 1548 1549 endidx = pmap_pvo_reclaim_nextidx; 1550 for (idx = (endidx + 1) & pmap_pteg_mask; idx != endidx; 1551 idx = (idx + 1) & pmap_pteg_mask) { 1552 pvoh = &pmap_pvo_table[idx]; 1553 TAILQ_FOREACH(pvo, pvoh, pvo_olink) { 1554 if (!PVO_WIRED_P(pvo)) { 1555 pmap_pvo_remove(pvo, -1, NULL); 1556 pmap_pvo_reclaim_nextidx = idx; 1557 PMAPCOUNT(pvos_reclaimed); 1558 return pvo; 1559 } 1560 } 1561 } 1562 return NULL; 1563 } 1564 1565 /* 1566 * This returns whether this is the first mapping of a page. 1567 */ 1568 int 1569 pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head, 1570 vaddr_t va, paddr_t pa, register_t pte_lo, int flags) 1571 { 1572 struct pvo_entry *pvo; 1573 struct pvo_tqhead *pvoh; 1574 register_t msr; 1575 int ptegidx; 1576 int i; 1577 int poolflags = PR_NOWAIT; 1578 1579 /* 1580 * Compute the PTE Group index. 1581 */ 1582 va &= ~ADDR_POFF; 1583 ptegidx = va_to_pteg(pm, va); 1584 1585 msr = pmap_interrupts_off(); 1586 1587 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1588 if (pmap_pvo_remove_depth > 0) 1589 panic("pmap_pvo_enter: called while pmap_pvo_remove active!"); 1590 if (++pmap_pvo_enter_depth > 1) 1591 panic("pmap_pvo_enter: called recursively!"); 1592 #endif 1593 1594 /* 1595 * Remove any existing mapping for this page. Reuse the 1596 * pvo entry if there a mapping. 1597 */ 1598 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 1599 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1600 #ifdef DEBUG 1601 if ((pmapdebug & PMAPDEBUG_PVOENTER) && 1602 ((pvo->pvo_pte.pte_lo ^ (pa|pte_lo)) & 1603 ~(PTE_REF|PTE_CHG)) == 0 && 1604 va < VM_MIN_KERNEL_ADDRESS) { 1605 printf("pmap_pvo_enter: pvo %p: dup %#" _PRIxpte "/%#" _PRIxpa "\n", 1606 pvo, pvo->pvo_pte.pte_lo, pte_lo|pa); 1607 printf("pmap_pvo_enter: pte_hi=%#" _PRIxpte " sr=%#" _PRIsr "\n", 1608 pvo->pvo_pte.pte_hi, 1609 pm->pm_sr[va >> ADDR_SR_SHFT]); 1610 pmap_pte_print(pmap_pvo_to_pte(pvo, -1)); 1611 #ifdef DDBX 1612 Debugger(); 1613 #endif 1614 } 1615 #endif 1616 PMAPCOUNT(mappings_replaced); 1617 pmap_pvo_remove(pvo, -1, NULL); 1618 break; 1619 } 1620 } 1621 1622 /* 1623 * If we aren't overwriting an mapping, try to allocate 1624 */ 1625 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1626 --pmap_pvo_enter_depth; 1627 #endif 1628 pmap_interrupts_restore(msr); 1629 if (pvo) { 1630 pmap_pvo_free(pvo); 1631 } 1632 pvo = pool_get(pl, poolflags); 1633 1634 #ifdef DEBUG 1635 /* 1636 * Exercise pmap_pvo_reclaim() a little. 1637 */ 1638 if (pvo && (flags & PMAP_CANFAIL) != 0 && 1639 pmap_pvo_reclaim_debugctr++ > 0x1000 && 1640 (pmap_pvo_reclaim_debugctr & 0xff) == 0) { 1641 pool_put(pl, pvo); 1642 pvo = NULL; 1643 } 1644 #endif 1645 1646 msr = pmap_interrupts_off(); 1647 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1648 ++pmap_pvo_enter_depth; 1649 #endif 1650 if (pvo == NULL) { 1651 pvo = pmap_pvo_reclaim(pm); 1652 if (pvo == NULL) { 1653 if ((flags & PMAP_CANFAIL) == 0) 1654 panic("pmap_pvo_enter: failed"); 1655 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1656 pmap_pvo_enter_depth--; 1657 #endif 1658 PMAPCOUNT(pvos_failed); 1659 pmap_interrupts_restore(msr); 1660 return ENOMEM; 1661 } 1662 } 1663 1664 pvo->pvo_vaddr = va; 1665 pvo->pvo_pmap = pm; 1666 pvo->pvo_vaddr &= ~ADDR_POFF; 1667 if (flags & VM_PROT_EXECUTE) { 1668 PMAPCOUNT(exec_mappings); 1669 pvo_set_exec(pvo); 1670 } 1671 if (flags & PMAP_WIRED) 1672 pvo->pvo_vaddr |= PVO_WIRED; 1673 if (pvo_head != &pmap_pvo_kunmanaged) { 1674 pvo->pvo_vaddr |= PVO_MANAGED; 1675 PMAPCOUNT(mappings); 1676 } else { 1677 PMAPCOUNT(kernel_mappings); 1678 } 1679 pmap_pte_create(&pvo->pvo_pte, pm, va, pa | pte_lo); 1680 1681 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 1682 if (PVO_WIRED_P(pvo)) 1683 pvo->pvo_pmap->pm_stats.wired_count++; 1684 pvo->pvo_pmap->pm_stats.resident_count++; 1685 #if defined(DEBUG) 1686 /* if (pm != pmap_kernel() && va < VM_MIN_KERNEL_ADDRESS) */ 1687 DPRINTFN(PVOENTER, 1688 ("pmap_pvo_enter: pvo %p: pm %p va %#" _PRIxva " pa %#" _PRIxpa "\n", 1689 pvo, pm, va, pa)); 1690 #endif 1691 1692 /* 1693 * We hope this succeeds but it isn't required. 1694 */ 1695 pvoh = &pmap_pvo_table[ptegidx]; 1696 i = pmap_pte_insert(ptegidx, &pvo->pvo_pte); 1697 if (i >= 0) { 1698 PVO_PTEGIDX_SET(pvo, i); 1699 PVO_WHERE(pvo, ENTER_INSERT); 1700 PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID) 1701 ? pmap_evcnt_ptes_secondary : pmap_evcnt_ptes_primary)[i]); 1702 TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink); 1703 1704 } else { 1705 /* 1706 * Since we didn't have room for this entry (which makes it 1707 * and evicted entry), place it at the head of the list. 1708 */ 1709 TAILQ_INSERT_HEAD(pvoh, pvo, pvo_olink); 1710 PMAPCOUNT(ptes_evicted); 1711 pm->pm_evictions++; 1712 /* 1713 * If this is a kernel page, make sure it's active. 1714 */ 1715 if (pm == pmap_kernel()) { 1716 i = pmap_pte_spill(pm, va, false); 1717 KASSERT(i); 1718 } 1719 } 1720 PMAP_PVO_CHECK(pvo); /* sanity check */ 1721 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1722 pmap_pvo_enter_depth--; 1723 #endif 1724 pmap_interrupts_restore(msr); 1725 return 0; 1726 } 1727 1728 static void 1729 pmap_pvo_remove(struct pvo_entry *pvo, int pteidx, struct pvo_head *pvol) 1730 { 1731 volatile struct pte *pt; 1732 int ptegidx; 1733 1734 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1735 if (++pmap_pvo_remove_depth > 1) 1736 panic("pmap_pvo_remove: called recursively!"); 1737 #endif 1738 1739 /* 1740 * If we haven't been supplied the ptegidx, calculate it. 1741 */ 1742 if (pteidx == -1) { 1743 ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr); 1744 pteidx = pmap_pvo_pte_index(pvo, ptegidx); 1745 } else { 1746 ptegidx = pteidx >> 3; 1747 if (pvo->pvo_pte.pte_hi & PTE_HID) 1748 ptegidx ^= pmap_pteg_mask; 1749 } 1750 PMAP_PVO_CHECK(pvo); /* sanity check */ 1751 1752 /* 1753 * If there is an active pte entry, we need to deactivate it 1754 * (and save the ref & chg bits). 1755 */ 1756 pt = pmap_pvo_to_pte(pvo, pteidx); 1757 if (pt != NULL) { 1758 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1759 PVO_WHERE(pvo, REMOVE); 1760 PVO_PTEGIDX_CLR(pvo); 1761 PMAPCOUNT(ptes_removed); 1762 } else { 1763 KASSERT(pvo->pvo_pmap->pm_evictions > 0); 1764 pvo->pvo_pmap->pm_evictions--; 1765 } 1766 1767 /* 1768 * Account for executable mappings. 1769 */ 1770 if (PVO_EXECUTABLE_P(pvo)) 1771 pvo_clear_exec(pvo); 1772 1773 /* 1774 * Update our statistics. 1775 */ 1776 pvo->pvo_pmap->pm_stats.resident_count--; 1777 if (PVO_WIRED_P(pvo)) 1778 pvo->pvo_pmap->pm_stats.wired_count--; 1779 1780 /* 1781 * Save the REF/CHG bits into their cache if the page is managed. 1782 */ 1783 if (PVO_MANAGED_P(pvo)) { 1784 register_t ptelo = pvo->pvo_pte.pte_lo; 1785 struct vm_page *pg = PHYS_TO_VM_PAGE(ptelo & PTE_RPGN); 1786 1787 if (pg != NULL) { 1788 /* 1789 * If this page was changed and it is mapped exec, 1790 * invalidate it. 1791 */ 1792 if ((ptelo & PTE_CHG) && 1793 (pmap_attr_fetch(pg) & PTE_EXEC)) { 1794 struct pvo_head *pvoh = vm_page_to_pvoh(pg); 1795 if (LIST_EMPTY(pvoh)) { 1796 DPRINTFN(EXEC, ("[pmap_pvo_remove: " 1797 "%#" _PRIxpa ": clear-exec]\n", 1798 VM_PAGE_TO_PHYS(pg))); 1799 pmap_attr_clear(pg, PTE_EXEC); 1800 PMAPCOUNT(exec_uncached_pvo_remove); 1801 } else { 1802 DPRINTFN(EXEC, ("[pmap_pvo_remove: " 1803 "%#" _PRIxpa ": syncicache]\n", 1804 VM_PAGE_TO_PHYS(pg))); 1805 pmap_syncicache(VM_PAGE_TO_PHYS(pg), 1806 PAGE_SIZE); 1807 PMAPCOUNT(exec_synced_pvo_remove); 1808 } 1809 } 1810 1811 pmap_attr_save(pg, ptelo & (PTE_REF|PTE_CHG)); 1812 } 1813 PMAPCOUNT(unmappings); 1814 } else { 1815 PMAPCOUNT(kernel_unmappings); 1816 } 1817 1818 /* 1819 * Remove the PVO from its lists and return it to the pool. 1820 */ 1821 LIST_REMOVE(pvo, pvo_vlink); 1822 TAILQ_REMOVE(&pmap_pvo_table[ptegidx], pvo, pvo_olink); 1823 if (pvol) { 1824 LIST_INSERT_HEAD(pvol, pvo, pvo_vlink); 1825 } 1826 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1827 pmap_pvo_remove_depth--; 1828 #endif 1829 } 1830 1831 void 1832 pmap_pvo_free(struct pvo_entry *pvo) 1833 { 1834 1835 pool_put(PVO_MANAGED_P(pvo) ? &pmap_mpvo_pool : &pmap_upvo_pool, pvo); 1836 } 1837 1838 void 1839 pmap_pvo_free_list(struct pvo_head *pvol) 1840 { 1841 struct pvo_entry *pvo, *npvo; 1842 1843 for (pvo = LIST_FIRST(pvol); pvo != NULL; pvo = npvo) { 1844 npvo = LIST_NEXT(pvo, pvo_vlink); 1845 LIST_REMOVE(pvo, pvo_vlink); 1846 pmap_pvo_free(pvo); 1847 } 1848 } 1849 1850 /* 1851 * Mark a mapping as executable. 1852 * If this is the first executable mapping in the segment, 1853 * clear the noexec flag. 1854 */ 1855 static void 1856 pvo_set_exec(struct pvo_entry *pvo) 1857 { 1858 struct pmap *pm = pvo->pvo_pmap; 1859 1860 if (pm == pmap_kernel() || PVO_EXECUTABLE_P(pvo)) { 1861 return; 1862 } 1863 pvo->pvo_vaddr |= PVO_EXECUTABLE; 1864 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 1865 { 1866 int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT; 1867 if (pm->pm_exec[sr]++ == 0) { 1868 pm->pm_sr[sr] &= ~SR_NOEXEC; 1869 } 1870 } 1871 #endif 1872 } 1873 1874 /* 1875 * Mark a mapping as non-executable. 1876 * If this was the last executable mapping in the segment, 1877 * set the noexec flag. 1878 */ 1879 static void 1880 pvo_clear_exec(struct pvo_entry *pvo) 1881 { 1882 struct pmap *pm = pvo->pvo_pmap; 1883 1884 if (pm == pmap_kernel() || !PVO_EXECUTABLE_P(pvo)) { 1885 return; 1886 } 1887 pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 1888 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 1889 { 1890 int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT; 1891 if (--pm->pm_exec[sr] == 0) { 1892 pm->pm_sr[sr] |= SR_NOEXEC; 1893 } 1894 } 1895 #endif 1896 } 1897 1898 /* 1899 * Insert physical page at pa into the given pmap at virtual address va. 1900 */ 1901 int 1902 pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 1903 { 1904 struct mem_region *mp; 1905 struct pvo_head *pvo_head; 1906 struct vm_page *pg; 1907 struct pool *pl; 1908 register_t pte_lo; 1909 int error; 1910 u_int pvo_flags; 1911 u_int was_exec = 0; 1912 1913 PMAP_LOCK(); 1914 1915 if (__predict_false(!pmap_initialized)) { 1916 pvo_head = &pmap_pvo_kunmanaged; 1917 pl = &pmap_upvo_pool; 1918 pvo_flags = 0; 1919 pg = NULL; 1920 was_exec = PTE_EXEC; 1921 } else { 1922 pvo_head = pa_to_pvoh(pa, &pg); 1923 pl = &pmap_mpvo_pool; 1924 pvo_flags = PVO_MANAGED; 1925 } 1926 1927 DPRINTFN(ENTER, 1928 ("pmap_enter(%p, %#" _PRIxva ", %#" _PRIxpa ", 0x%x, 0x%x):", 1929 pm, va, pa, prot, flags)); 1930 1931 /* 1932 * If this is a managed page, and it's the first reference to the 1933 * page clear the execness of the page. Otherwise fetch the execness. 1934 */ 1935 if (pg != NULL) 1936 was_exec = pmap_attr_fetch(pg) & PTE_EXEC; 1937 1938 DPRINTFN(ENTER, (" was_exec=%d", was_exec)); 1939 1940 /* 1941 * Assume the page is cache inhibited and access is guarded unless 1942 * it's in our available memory array. If it is in the memory array, 1943 * asssume it's in memory coherent memory. 1944 */ 1945 pte_lo = PTE_IG; 1946 if ((flags & PMAP_MD_NOCACHE) == 0) { 1947 for (mp = mem; mp->size; mp++) { 1948 if (pa >= mp->start && pa < mp->start + mp->size) { 1949 pte_lo = PTE_M; 1950 break; 1951 } 1952 } 1953 } 1954 1955 if (prot & VM_PROT_WRITE) 1956 pte_lo |= PTE_BW; 1957 else 1958 pte_lo |= PTE_BR; 1959 1960 /* 1961 * If this was in response to a fault, "pre-fault" the PTE's 1962 * changed/referenced bit appropriately. 1963 */ 1964 if (flags & VM_PROT_WRITE) 1965 pte_lo |= PTE_CHG; 1966 if (flags & VM_PROT_ALL) 1967 pte_lo |= PTE_REF; 1968 1969 /* 1970 * We need to know if this page can be executable 1971 */ 1972 flags |= (prot & VM_PROT_EXECUTE); 1973 1974 /* 1975 * Record mapping for later back-translation and pte spilling. 1976 * This will overwrite any existing mapping. 1977 */ 1978 error = pmap_pvo_enter(pm, pl, pvo_head, va, pa, pte_lo, flags); 1979 1980 /* 1981 * Flush the real page from the instruction cache if this page is 1982 * mapped executable and cacheable and has not been flushed since 1983 * the last time it was modified. 1984 */ 1985 if (error == 0 && 1986 (flags & VM_PROT_EXECUTE) && 1987 (pte_lo & PTE_I) == 0 && 1988 was_exec == 0) { 1989 DPRINTFN(ENTER, (" syncicache")); 1990 PMAPCOUNT(exec_synced); 1991 pmap_syncicache(pa, PAGE_SIZE); 1992 if (pg != NULL) { 1993 pmap_attr_save(pg, PTE_EXEC); 1994 PMAPCOUNT(exec_cached); 1995 #if defined(DEBUG) || defined(PMAPDEBUG) 1996 if (pmapdebug & PMAPDEBUG_ENTER) 1997 printf(" marked-as-exec"); 1998 else if (pmapdebug & PMAPDEBUG_EXEC) 1999 printf("[pmap_enter: %#" _PRIxpa ": marked-as-exec]\n", 2000 VM_PAGE_TO_PHYS(pg)); 2001 2002 #endif 2003 } 2004 } 2005 2006 DPRINTFN(ENTER, (": error=%d\n", error)); 2007 2008 PMAP_UNLOCK(); 2009 2010 return error; 2011 } 2012 2013 void 2014 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 2015 { 2016 struct mem_region *mp; 2017 register_t pte_lo; 2018 int error; 2019 2020 #if defined (PMAP_OEA64_BRIDGE) 2021 if (va < VM_MIN_KERNEL_ADDRESS) 2022 panic("pmap_kenter_pa: attempt to enter " 2023 "non-kernel address %#" _PRIxva "!", va); 2024 #endif 2025 2026 DPRINTFN(KENTER, 2027 ("pmap_kenter_pa(%#" _PRIxva ",%#" _PRIxpa ",%#x)\n", va, pa, prot)); 2028 2029 PMAP_LOCK(); 2030 2031 /* 2032 * Assume the page is cache inhibited and access is guarded unless 2033 * it's in our available memory array. If it is in the memory array, 2034 * asssume it's in memory coherent memory. 2035 */ 2036 pte_lo = PTE_IG; 2037 if ((flags & PMAP_MD_NOCACHE) == 0) { 2038 for (mp = mem; mp->size; mp++) { 2039 if (pa >= mp->start && pa < mp->start + mp->size) { 2040 pte_lo = PTE_M; 2041 break; 2042 } 2043 } 2044 } 2045 2046 if (prot & VM_PROT_WRITE) 2047 pte_lo |= PTE_BW; 2048 else 2049 pte_lo |= PTE_BR; 2050 2051 /* 2052 * We don't care about REF/CHG on PVOs on the unmanaged list. 2053 */ 2054 error = pmap_pvo_enter(pmap_kernel(), &pmap_upvo_pool, 2055 &pmap_pvo_kunmanaged, va, pa, pte_lo, prot|PMAP_WIRED); 2056 2057 if (error != 0) 2058 panic("pmap_kenter_pa: failed to enter va %#" _PRIxva " pa %#" _PRIxpa ": %d", 2059 va, pa, error); 2060 2061 PMAP_UNLOCK(); 2062 } 2063 2064 void 2065 pmap_kremove(vaddr_t va, vsize_t len) 2066 { 2067 if (va < VM_MIN_KERNEL_ADDRESS) 2068 panic("pmap_kremove: attempt to remove " 2069 "non-kernel address %#" _PRIxva "!", va); 2070 2071 DPRINTFN(KREMOVE,("pmap_kremove(%#" _PRIxva ",%#" _PRIxva ")\n", va, len)); 2072 pmap_remove(pmap_kernel(), va, va + len); 2073 } 2074 2075 /* 2076 * Remove the given range of mapping entries. 2077 */ 2078 void 2079 pmap_remove(pmap_t pm, vaddr_t va, vaddr_t endva) 2080 { 2081 struct pvo_head pvol; 2082 struct pvo_entry *pvo; 2083 register_t msr; 2084 int pteidx; 2085 2086 PMAP_LOCK(); 2087 LIST_INIT(&pvol); 2088 msr = pmap_interrupts_off(); 2089 for (; va < endva; va += PAGE_SIZE) { 2090 pvo = pmap_pvo_find_va(pm, va, &pteidx); 2091 if (pvo != NULL) { 2092 pmap_pvo_remove(pvo, pteidx, &pvol); 2093 } 2094 } 2095 pmap_interrupts_restore(msr); 2096 pmap_pvo_free_list(&pvol); 2097 PMAP_UNLOCK(); 2098 } 2099 2100 /* 2101 * Get the physical page address for the given pmap/virtual address. 2102 */ 2103 bool 2104 pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap) 2105 { 2106 struct pvo_entry *pvo; 2107 register_t msr; 2108 2109 PMAP_LOCK(); 2110 2111 /* 2112 * If this is a kernel pmap lookup, also check the battable 2113 * and if we get a hit, translate the VA to a PA using the 2114 * BAT entries. Don't check for VM_MAX_KERNEL_ADDRESS is 2115 * that will wrap back to 0. 2116 */ 2117 if (pm == pmap_kernel() && 2118 (va < VM_MIN_KERNEL_ADDRESS || 2119 (KERNEL2_SR < 15 && VM_MAX_KERNEL_ADDRESS <= va))) { 2120 KASSERT((va >> ADDR_SR_SHFT) != USER_SR); 2121 #if defined (PMAP_OEA) 2122 #ifdef PPC_OEA601 2123 if ((MFPVR() >> 16) == MPC601) { 2124 register_t batu = battable[va >> 23].batu; 2125 register_t batl = battable[va >> 23].batl; 2126 register_t sr = iosrtable[va >> ADDR_SR_SHFT]; 2127 if (BAT601_VALID_P(batl) && 2128 BAT601_VA_MATCH_P(batu, batl, va)) { 2129 register_t mask = 2130 (~(batl & BAT601_BSM) << 17) & ~0x1ffffL; 2131 if (pap) 2132 *pap = (batl & mask) | (va & ~mask); 2133 PMAP_UNLOCK(); 2134 return true; 2135 } else if (SR601_VALID_P(sr) && 2136 SR601_PA_MATCH_P(sr, va)) { 2137 if (pap) 2138 *pap = va; 2139 PMAP_UNLOCK(); 2140 return true; 2141 } 2142 } else 2143 #endif /* PPC_OEA601 */ 2144 { 2145 register_t batu = battable[va >> ADDR_SR_SHFT].batu; 2146 if (BAT_VALID_P(batu,0) && BAT_VA_MATCH_P(batu,va)) { 2147 register_t batl = 2148 battable[va >> ADDR_SR_SHFT].batl; 2149 register_t mask = 2150 (~(batu & BAT_BL) << 15) & ~0x1ffffL; 2151 if (pap) 2152 *pap = (batl & mask) | (va & ~mask); 2153 PMAP_UNLOCK(); 2154 return true; 2155 } 2156 } 2157 return false; 2158 #elif defined (PMAP_OEA64_BRIDGE) 2159 if (va >= SEGMENT_LENGTH) 2160 panic("%s: pm: %s va >= SEGMENT_LENGTH, va: 0x%08lx\n", 2161 __func__, (pm == pmap_kernel() ? "kernel" : "user"), va); 2162 else { 2163 if (pap) 2164 *pap = va; 2165 PMAP_UNLOCK(); 2166 return true; 2167 } 2168 #elif defined (PMAP_OEA64) 2169 #error PPC_OEA64 not supported 2170 #endif /* PPC_OEA */ 2171 } 2172 2173 msr = pmap_interrupts_off(); 2174 pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 2175 if (pvo != NULL) { 2176 PMAP_PVO_CHECK(pvo); /* sanity check */ 2177 if (pap) 2178 *pap = (pvo->pvo_pte.pte_lo & PTE_RPGN) 2179 | (va & ADDR_POFF); 2180 } 2181 pmap_interrupts_restore(msr); 2182 PMAP_UNLOCK(); 2183 return pvo != NULL; 2184 } 2185 2186 /* 2187 * Lower the protection on the specified range of this pmap. 2188 */ 2189 void 2190 pmap_protect(pmap_t pm, vaddr_t va, vaddr_t endva, vm_prot_t prot) 2191 { 2192 struct pvo_entry *pvo; 2193 volatile struct pte *pt; 2194 register_t msr; 2195 int pteidx; 2196 2197 /* 2198 * Since this routine only downgrades protection, we should 2199 * always be called with at least one bit not set. 2200 */ 2201 KASSERT(prot != VM_PROT_ALL); 2202 2203 /* 2204 * If there is no protection, this is equivalent to 2205 * remove the pmap from the pmap. 2206 */ 2207 if ((prot & VM_PROT_READ) == 0) { 2208 pmap_remove(pm, va, endva); 2209 return; 2210 } 2211 2212 PMAP_LOCK(); 2213 2214 msr = pmap_interrupts_off(); 2215 for (; va < endva; va += PAGE_SIZE) { 2216 pvo = pmap_pvo_find_va(pm, va, &pteidx); 2217 if (pvo == NULL) 2218 continue; 2219 PMAP_PVO_CHECK(pvo); /* sanity check */ 2220 2221 /* 2222 * Revoke executable if asked to do so. 2223 */ 2224 if ((prot & VM_PROT_EXECUTE) == 0) 2225 pvo_clear_exec(pvo); 2226 2227 #if 0 2228 /* 2229 * If the page is already read-only, no change 2230 * needs to be made. 2231 */ 2232 if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) 2233 continue; 2234 #endif 2235 /* 2236 * Grab the PTE pointer before we diddle with 2237 * the cached PTE copy. 2238 */ 2239 pt = pmap_pvo_to_pte(pvo, pteidx); 2240 /* 2241 * Change the protection of the page. 2242 */ 2243 pvo->pvo_pte.pte_lo &= ~PTE_PP; 2244 pvo->pvo_pte.pte_lo |= PTE_BR; 2245 2246 /* 2247 * If the PVO is in the page table, update 2248 * that pte at well. 2249 */ 2250 if (pt != NULL) { 2251 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 2252 PVO_WHERE(pvo, PMAP_PROTECT); 2253 PMAPCOUNT(ptes_changed); 2254 } 2255 2256 PMAP_PVO_CHECK(pvo); /* sanity check */ 2257 } 2258 pmap_interrupts_restore(msr); 2259 PMAP_UNLOCK(); 2260 } 2261 2262 void 2263 pmap_unwire(pmap_t pm, vaddr_t va) 2264 { 2265 struct pvo_entry *pvo; 2266 register_t msr; 2267 2268 PMAP_LOCK(); 2269 msr = pmap_interrupts_off(); 2270 pvo = pmap_pvo_find_va(pm, va, NULL); 2271 if (pvo != NULL) { 2272 if (PVO_WIRED_P(pvo)) { 2273 pvo->pvo_vaddr &= ~PVO_WIRED; 2274 pm->pm_stats.wired_count--; 2275 } 2276 PMAP_PVO_CHECK(pvo); /* sanity check */ 2277 } 2278 pmap_interrupts_restore(msr); 2279 PMAP_UNLOCK(); 2280 } 2281 2282 /* 2283 * Lower the protection on the specified physical page. 2284 */ 2285 void 2286 pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 2287 { 2288 struct pvo_head *pvo_head, pvol; 2289 struct pvo_entry *pvo, *next_pvo; 2290 volatile struct pte *pt; 2291 register_t msr; 2292 2293 PMAP_LOCK(); 2294 2295 KASSERT(prot != VM_PROT_ALL); 2296 LIST_INIT(&pvol); 2297 msr = pmap_interrupts_off(); 2298 2299 /* 2300 * When UVM reuses a page, it does a pmap_page_protect with 2301 * VM_PROT_NONE. At that point, we can clear the exec flag 2302 * since we know the page will have different contents. 2303 */ 2304 if ((prot & VM_PROT_READ) == 0) { 2305 DPRINTFN(EXEC, ("[pmap_page_protect: %#" _PRIxpa ": clear-exec]\n", 2306 VM_PAGE_TO_PHYS(pg))); 2307 if (pmap_attr_fetch(pg) & PTE_EXEC) { 2308 PMAPCOUNT(exec_uncached_page_protect); 2309 pmap_attr_clear(pg, PTE_EXEC); 2310 } 2311 } 2312 2313 pvo_head = vm_page_to_pvoh(pg); 2314 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 2315 next_pvo = LIST_NEXT(pvo, pvo_vlink); 2316 PMAP_PVO_CHECK(pvo); /* sanity check */ 2317 2318 /* 2319 * Downgrading to no mapping at all, we just remove the entry. 2320 */ 2321 if ((prot & VM_PROT_READ) == 0) { 2322 pmap_pvo_remove(pvo, -1, &pvol); 2323 continue; 2324 } 2325 2326 /* 2327 * If EXEC permission is being revoked, just clear the 2328 * flag in the PVO. 2329 */ 2330 if ((prot & VM_PROT_EXECUTE) == 0) 2331 pvo_clear_exec(pvo); 2332 2333 /* 2334 * If this entry is already RO, don't diddle with the 2335 * page table. 2336 */ 2337 if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) { 2338 PMAP_PVO_CHECK(pvo); 2339 continue; 2340 } 2341 2342 /* 2343 * Grab the PTE before the we diddle the bits so 2344 * pvo_to_pte can verify the pte contents are as 2345 * expected. 2346 */ 2347 pt = pmap_pvo_to_pte(pvo, -1); 2348 pvo->pvo_pte.pte_lo &= ~PTE_PP; 2349 pvo->pvo_pte.pte_lo |= PTE_BR; 2350 if (pt != NULL) { 2351 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 2352 PVO_WHERE(pvo, PMAP_PAGE_PROTECT); 2353 PMAPCOUNT(ptes_changed); 2354 } 2355 PMAP_PVO_CHECK(pvo); /* sanity check */ 2356 } 2357 pmap_interrupts_restore(msr); 2358 pmap_pvo_free_list(&pvol); 2359 2360 PMAP_UNLOCK(); 2361 } 2362 2363 /* 2364 * Activate the address space for the specified process. If the process 2365 * is the current process, load the new MMU context. 2366 */ 2367 void 2368 pmap_activate(struct lwp *l) 2369 { 2370 struct pcb *pcb = lwp_getpcb(l); 2371 pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap; 2372 2373 DPRINTFN(ACTIVATE, 2374 ("pmap_activate: lwp %p (curlwp %p)\n", l, curlwp)); 2375 2376 /* 2377 * XXX Normally performed in cpu_lwp_fork(). 2378 */ 2379 pcb->pcb_pm = pmap; 2380 2381 /* 2382 * In theory, the SR registers need only be valid on return 2383 * to user space wait to do them there. 2384 */ 2385 if (l == curlwp) { 2386 /* Store pointer to new current pmap. */ 2387 curpm = pmap; 2388 } 2389 } 2390 2391 /* 2392 * Deactivate the specified process's address space. 2393 */ 2394 void 2395 pmap_deactivate(struct lwp *l) 2396 { 2397 } 2398 2399 bool 2400 pmap_query_bit(struct vm_page *pg, int ptebit) 2401 { 2402 struct pvo_entry *pvo; 2403 volatile struct pte *pt; 2404 register_t msr; 2405 2406 PMAP_LOCK(); 2407 2408 if (pmap_attr_fetch(pg) & ptebit) { 2409 PMAP_UNLOCK(); 2410 return true; 2411 } 2412 2413 msr = pmap_interrupts_off(); 2414 LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) { 2415 PMAP_PVO_CHECK(pvo); /* sanity check */ 2416 /* 2417 * See if we saved the bit off. If so cache, it and return 2418 * success. 2419 */ 2420 if (pvo->pvo_pte.pte_lo & ptebit) { 2421 pmap_attr_save(pg, ptebit); 2422 PMAP_PVO_CHECK(pvo); /* sanity check */ 2423 pmap_interrupts_restore(msr); 2424 PMAP_UNLOCK(); 2425 return true; 2426 } 2427 } 2428 /* 2429 * No luck, now go thru the hard part of looking at the ptes 2430 * themselves. Sync so any pending REF/CHG bits are flushed 2431 * to the PTEs. 2432 */ 2433 SYNC(); 2434 LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) { 2435 PMAP_PVO_CHECK(pvo); /* sanity check */ 2436 /* 2437 * See if this pvo have a valid PTE. If so, fetch the 2438 * REF/CHG bits from the valid PTE. If the appropriate 2439 * ptebit is set, cache, it and return success. 2440 */ 2441 pt = pmap_pvo_to_pte(pvo, -1); 2442 if (pt != NULL) { 2443 pmap_pte_synch(pt, &pvo->pvo_pte); 2444 if (pvo->pvo_pte.pte_lo & ptebit) { 2445 pmap_attr_save(pg, ptebit); 2446 PMAP_PVO_CHECK(pvo); /* sanity check */ 2447 pmap_interrupts_restore(msr); 2448 PMAP_UNLOCK(); 2449 return true; 2450 } 2451 } 2452 } 2453 pmap_interrupts_restore(msr); 2454 PMAP_UNLOCK(); 2455 return false; 2456 } 2457 2458 bool 2459 pmap_clear_bit(struct vm_page *pg, int ptebit) 2460 { 2461 struct pvo_head *pvoh = vm_page_to_pvoh(pg); 2462 struct pvo_entry *pvo; 2463 volatile struct pte *pt; 2464 register_t msr; 2465 int rv = 0; 2466 2467 PMAP_LOCK(); 2468 msr = pmap_interrupts_off(); 2469 2470 /* 2471 * Fetch the cache value 2472 */ 2473 rv |= pmap_attr_fetch(pg); 2474 2475 /* 2476 * Clear the cached value. 2477 */ 2478 pmap_attr_clear(pg, ptebit); 2479 2480 /* 2481 * Sync so any pending REF/CHG bits are flushed to the PTEs (so we 2482 * can reset the right ones). Note that since the pvo entries and 2483 * list heads are accessed via BAT0 and are never placed in the 2484 * page table, we don't have to worry about further accesses setting 2485 * the REF/CHG bits. 2486 */ 2487 SYNC(); 2488 2489 /* 2490 * For each pvo entry, clear pvo's ptebit. If this pvo have a 2491 * valid PTE. If so, clear the ptebit from the valid PTE. 2492 */ 2493 LIST_FOREACH(pvo, pvoh, pvo_vlink) { 2494 PMAP_PVO_CHECK(pvo); /* sanity check */ 2495 pt = pmap_pvo_to_pte(pvo, -1); 2496 if (pt != NULL) { 2497 /* 2498 * Only sync the PTE if the bit we are looking 2499 * for is not already set. 2500 */ 2501 if ((pvo->pvo_pte.pte_lo & ptebit) == 0) 2502 pmap_pte_synch(pt, &pvo->pvo_pte); 2503 /* 2504 * If the bit we are looking for was already set, 2505 * clear that bit in the pte. 2506 */ 2507 if (pvo->pvo_pte.pte_lo & ptebit) 2508 pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit); 2509 } 2510 rv |= pvo->pvo_pte.pte_lo & (PTE_CHG|PTE_REF); 2511 pvo->pvo_pte.pte_lo &= ~ptebit; 2512 PMAP_PVO_CHECK(pvo); /* sanity check */ 2513 } 2514 pmap_interrupts_restore(msr); 2515 2516 /* 2517 * If we are clearing the modify bit and this page was marked EXEC 2518 * and the user of the page thinks the page was modified, then we 2519 * need to clean it from the icache if it's mapped or clear the EXEC 2520 * bit if it's not mapped. The page itself might not have the CHG 2521 * bit set if the modification was done via DMA to the page. 2522 */ 2523 if ((ptebit & PTE_CHG) && (rv & PTE_EXEC)) { 2524 if (LIST_EMPTY(pvoh)) { 2525 DPRINTFN(EXEC, ("[pmap_clear_bit: %#" _PRIxpa ": clear-exec]\n", 2526 VM_PAGE_TO_PHYS(pg))); 2527 pmap_attr_clear(pg, PTE_EXEC); 2528 PMAPCOUNT(exec_uncached_clear_modify); 2529 } else { 2530 DPRINTFN(EXEC, ("[pmap_clear_bit: %#" _PRIxpa ": syncicache]\n", 2531 VM_PAGE_TO_PHYS(pg))); 2532 pmap_syncicache(VM_PAGE_TO_PHYS(pg), PAGE_SIZE); 2533 PMAPCOUNT(exec_synced_clear_modify); 2534 } 2535 } 2536 PMAP_UNLOCK(); 2537 return (rv & ptebit) != 0; 2538 } 2539 2540 void 2541 pmap_procwr(struct proc *p, vaddr_t va, size_t len) 2542 { 2543 struct pvo_entry *pvo; 2544 size_t offset = va & ADDR_POFF; 2545 int s; 2546 2547 PMAP_LOCK(); 2548 s = splvm(); 2549 while (len > 0) { 2550 size_t seglen = PAGE_SIZE - offset; 2551 if (seglen > len) 2552 seglen = len; 2553 pvo = pmap_pvo_find_va(p->p_vmspace->vm_map.pmap, va, NULL); 2554 if (pvo != NULL && PVO_EXECUTABLE_P(pvo)) { 2555 pmap_syncicache( 2556 (pvo->pvo_pte.pte_lo & PTE_RPGN) | offset, seglen); 2557 PMAP_PVO_CHECK(pvo); 2558 } 2559 va += seglen; 2560 len -= seglen; 2561 offset = 0; 2562 } 2563 splx(s); 2564 PMAP_UNLOCK(); 2565 } 2566 2567 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 2568 void 2569 pmap_pte_print(volatile struct pte *pt) 2570 { 2571 printf("PTE %p: ", pt); 2572 2573 #if defined(PMAP_OEA) 2574 /* High word: */ 2575 printf("%#" _PRIxpte ": [", pt->pte_hi); 2576 #else 2577 printf("%#" _PRIxpte ": [", pt->pte_hi); 2578 #endif /* PMAP_OEA */ 2579 2580 printf("%c ", (pt->pte_hi & PTE_VALID) ? 'v' : 'i'); 2581 printf("%c ", (pt->pte_hi & PTE_HID) ? 'h' : '-'); 2582 2583 printf("%#" _PRIxpte " %#" _PRIxpte "", 2584 (pt->pte_hi &~ PTE_VALID)>>PTE_VSID_SHFT, 2585 pt->pte_hi & PTE_API); 2586 #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE) 2587 printf(" (va %#" _PRIxva ")] ", pmap_pte_to_va(pt)); 2588 #else 2589 printf(" (va %#" _PRIxva ")] ", pmap_pte_to_va(pt)); 2590 #endif /* PMAP_OEA */ 2591 2592 /* Low word: */ 2593 #if defined (PMAP_OEA) 2594 printf(" %#" _PRIxpte ": [", pt->pte_lo); 2595 printf("%#" _PRIxpte "... ", pt->pte_lo >> 12); 2596 #else 2597 printf(" %#" _PRIxpte ": [", pt->pte_lo); 2598 printf("%#" _PRIxpte "... ", pt->pte_lo >> 12); 2599 #endif 2600 printf("%c ", (pt->pte_lo & PTE_REF) ? 'r' : 'u'); 2601 printf("%c ", (pt->pte_lo & PTE_CHG) ? 'c' : 'n'); 2602 printf("%c", (pt->pte_lo & PTE_W) ? 'w' : '.'); 2603 printf("%c", (pt->pte_lo & PTE_I) ? 'i' : '.'); 2604 printf("%c", (pt->pte_lo & PTE_M) ? 'm' : '.'); 2605 printf("%c ", (pt->pte_lo & PTE_G) ? 'g' : '.'); 2606 switch (pt->pte_lo & PTE_PP) { 2607 case PTE_BR: printf("br]\n"); break; 2608 case PTE_BW: printf("bw]\n"); break; 2609 case PTE_SO: printf("so]\n"); break; 2610 case PTE_SW: printf("sw]\n"); break; 2611 } 2612 } 2613 #endif 2614 2615 #if defined(DDB) 2616 void 2617 pmap_pteg_check(void) 2618 { 2619 volatile struct pte *pt; 2620 int i; 2621 int ptegidx; 2622 u_int p_valid = 0; 2623 u_int s_valid = 0; 2624 u_int invalid = 0; 2625 2626 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) { 2627 for (pt = pmap_pteg_table[ptegidx].pt, i = 8; --i >= 0; pt++) { 2628 if (pt->pte_hi & PTE_VALID) { 2629 if (pt->pte_hi & PTE_HID) 2630 s_valid++; 2631 else 2632 { 2633 p_valid++; 2634 } 2635 } else 2636 invalid++; 2637 } 2638 } 2639 printf("pteg_check: v(p) %#x (%d), v(s) %#x (%d), i %#x (%d)\n", 2640 p_valid, p_valid, s_valid, s_valid, 2641 invalid, invalid); 2642 } 2643 2644 void 2645 pmap_print_mmuregs(void) 2646 { 2647 int i; 2648 u_int cpuvers; 2649 #ifndef PMAP_OEA64 2650 vaddr_t addr; 2651 register_t soft_sr[16]; 2652 #endif 2653 #if defined (PMAP_OEA) || defined (PMAP_OEA_BRIDGE) 2654 struct bat soft_ibat[4]; 2655 struct bat soft_dbat[4]; 2656 #endif 2657 paddr_t sdr1; 2658 2659 cpuvers = MFPVR() >> 16; 2660 __asm volatile ("mfsdr1 %0" : "=r"(sdr1)); 2661 #ifndef PMAP_OEA64 2662 addr = 0; 2663 for (i = 0; i < 16; i++) { 2664 soft_sr[i] = MFSRIN(addr); 2665 addr += (1 << ADDR_SR_SHFT); 2666 } 2667 #endif 2668 2669 #if defined (PMAP_OEA) || defined (PMAP_OEA_BRIDGE) 2670 /* read iBAT (601: uBAT) registers */ 2671 __asm volatile ("mfibatu %0,0" : "=r"(soft_ibat[0].batu)); 2672 __asm volatile ("mfibatl %0,0" : "=r"(soft_ibat[0].batl)); 2673 __asm volatile ("mfibatu %0,1" : "=r"(soft_ibat[1].batu)); 2674 __asm volatile ("mfibatl %0,1" : "=r"(soft_ibat[1].batl)); 2675 __asm volatile ("mfibatu %0,2" : "=r"(soft_ibat[2].batu)); 2676 __asm volatile ("mfibatl %0,2" : "=r"(soft_ibat[2].batl)); 2677 __asm volatile ("mfibatu %0,3" : "=r"(soft_ibat[3].batu)); 2678 __asm volatile ("mfibatl %0,3" : "=r"(soft_ibat[3].batl)); 2679 2680 2681 if (cpuvers != MPC601) { 2682 /* read dBAT registers */ 2683 __asm volatile ("mfdbatu %0,0" : "=r"(soft_dbat[0].batu)); 2684 __asm volatile ("mfdbatl %0,0" : "=r"(soft_dbat[0].batl)); 2685 __asm volatile ("mfdbatu %0,1" : "=r"(soft_dbat[1].batu)); 2686 __asm volatile ("mfdbatl %0,1" : "=r"(soft_dbat[1].batl)); 2687 __asm volatile ("mfdbatu %0,2" : "=r"(soft_dbat[2].batu)); 2688 __asm volatile ("mfdbatl %0,2" : "=r"(soft_dbat[2].batl)); 2689 __asm volatile ("mfdbatu %0,3" : "=r"(soft_dbat[3].batu)); 2690 __asm volatile ("mfdbatl %0,3" : "=r"(soft_dbat[3].batl)); 2691 } 2692 #endif 2693 2694 printf("SDR1:\t%#" _PRIxpa "\n", sdr1); 2695 #ifndef PMAP_OEA64 2696 printf("SR[]:\t"); 2697 for (i = 0; i < 4; i++) 2698 printf("0x%08lx, ", soft_sr[i]); 2699 printf("\n\t"); 2700 for ( ; i < 8; i++) 2701 printf("0x%08lx, ", soft_sr[i]); 2702 printf("\n\t"); 2703 for ( ; i < 12; i++) 2704 printf("0x%08lx, ", soft_sr[i]); 2705 printf("\n\t"); 2706 for ( ; i < 16; i++) 2707 printf("0x%08lx, ", soft_sr[i]); 2708 printf("\n"); 2709 #endif 2710 2711 #if defined(PMAP_OEA) || defined(PMAP_OEA_BRIDGE) 2712 printf("%cBAT[]:\t", cpuvers == MPC601 ? 'u' : 'i'); 2713 for (i = 0; i < 4; i++) { 2714 printf("0x%08lx 0x%08lx, ", 2715 soft_ibat[i].batu, soft_ibat[i].batl); 2716 if (i == 1) 2717 printf("\n\t"); 2718 } 2719 if (cpuvers != MPC601) { 2720 printf("\ndBAT[]:\t"); 2721 for (i = 0; i < 4; i++) { 2722 printf("0x%08lx 0x%08lx, ", 2723 soft_dbat[i].batu, soft_dbat[i].batl); 2724 if (i == 1) 2725 printf("\n\t"); 2726 } 2727 } 2728 printf("\n"); 2729 #endif /* PMAP_OEA... */ 2730 } 2731 2732 void 2733 pmap_print_pte(pmap_t pm, vaddr_t va) 2734 { 2735 struct pvo_entry *pvo; 2736 volatile struct pte *pt; 2737 int pteidx; 2738 2739 pvo = pmap_pvo_find_va(pm, va, &pteidx); 2740 if (pvo != NULL) { 2741 pt = pmap_pvo_to_pte(pvo, pteidx); 2742 if (pt != NULL) { 2743 printf("VA %#" _PRIxva " -> %p -> %s %#" _PRIxpte ", %#" _PRIxpte "\n", 2744 va, pt, 2745 pt->pte_hi & PTE_HID ? "(sec)" : "(pri)", 2746 pt->pte_hi, pt->pte_lo); 2747 } else { 2748 printf("No valid PTE found\n"); 2749 } 2750 } else { 2751 printf("Address not in pmap\n"); 2752 } 2753 } 2754 2755 void 2756 pmap_pteg_dist(void) 2757 { 2758 struct pvo_entry *pvo; 2759 int ptegidx; 2760 int depth; 2761 int max_depth = 0; 2762 unsigned int depths[64]; 2763 2764 memset(depths, 0, sizeof(depths)); 2765 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) { 2766 depth = 0; 2767 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 2768 depth++; 2769 } 2770 if (depth > max_depth) 2771 max_depth = depth; 2772 if (depth > 63) 2773 depth = 63; 2774 depths[depth]++; 2775 } 2776 2777 for (depth = 0; depth < 64; depth++) { 2778 printf(" [%2d]: %8u", depth, depths[depth]); 2779 if ((depth & 3) == 3) 2780 printf("\n"); 2781 if (depth == max_depth) 2782 break; 2783 } 2784 if ((depth & 3) != 3) 2785 printf("\n"); 2786 printf("Max depth found was %d\n", max_depth); 2787 } 2788 #endif /* DEBUG */ 2789 2790 #if defined(PMAPCHECK) || defined(DEBUG) 2791 void 2792 pmap_pvo_verify(void) 2793 { 2794 int ptegidx; 2795 int s; 2796 2797 s = splvm(); 2798 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) { 2799 struct pvo_entry *pvo; 2800 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 2801 if ((uintptr_t) pvo >= SEGMENT_LENGTH) 2802 panic("pmap_pvo_verify: invalid pvo %p " 2803 "on list %#x", pvo, ptegidx); 2804 pmap_pvo_check(pvo); 2805 } 2806 } 2807 splx(s); 2808 } 2809 #endif /* PMAPCHECK */ 2810 2811 2812 void * 2813 pmap_pool_ualloc(struct pool *pp, int flags) 2814 { 2815 struct pvo_page *pvop; 2816 2817 if (uvm.page_init_done != true) { 2818 return (void *) uvm_pageboot_alloc(PAGE_SIZE); 2819 } 2820 2821 PMAP_LOCK(); 2822 pvop = SIMPLEQ_FIRST(&pmap_upvop_head); 2823 if (pvop != NULL) { 2824 pmap_upvop_free--; 2825 SIMPLEQ_REMOVE_HEAD(&pmap_upvop_head, pvop_link); 2826 PMAP_UNLOCK(); 2827 return pvop; 2828 } 2829 PMAP_UNLOCK(); 2830 return pmap_pool_malloc(pp, flags); 2831 } 2832 2833 void * 2834 pmap_pool_malloc(struct pool *pp, int flags) 2835 { 2836 struct pvo_page *pvop; 2837 struct vm_page *pg; 2838 2839 PMAP_LOCK(); 2840 pvop = SIMPLEQ_FIRST(&pmap_mpvop_head); 2841 if (pvop != NULL) { 2842 pmap_mpvop_free--; 2843 SIMPLEQ_REMOVE_HEAD(&pmap_mpvop_head, pvop_link); 2844 PMAP_UNLOCK(); 2845 return pvop; 2846 } 2847 PMAP_UNLOCK(); 2848 again: 2849 pg = uvm_pagealloc_strat(NULL, 0, NULL, UVM_PGA_USERESERVE, 2850 UVM_PGA_STRAT_ONLY, VM_FREELIST_FIRST256); 2851 if (__predict_false(pg == NULL)) { 2852 if (flags & PR_WAITOK) { 2853 uvm_wait("plpg"); 2854 goto again; 2855 } else { 2856 return (0); 2857 } 2858 } 2859 KDASSERT(VM_PAGE_TO_PHYS(pg) == (uintptr_t)VM_PAGE_TO_PHYS(pg)); 2860 return (void *)(uintptr_t) VM_PAGE_TO_PHYS(pg); 2861 } 2862 2863 void 2864 pmap_pool_ufree(struct pool *pp, void *va) 2865 { 2866 struct pvo_page *pvop; 2867 #if 0 2868 if (PHYS_TO_VM_PAGE((paddr_t) va) != NULL) { 2869 pmap_pool_mfree(va, size, tag); 2870 return; 2871 } 2872 #endif 2873 PMAP_LOCK(); 2874 pvop = va; 2875 SIMPLEQ_INSERT_HEAD(&pmap_upvop_head, pvop, pvop_link); 2876 pmap_upvop_free++; 2877 if (pmap_upvop_free > pmap_upvop_maxfree) 2878 pmap_upvop_maxfree = pmap_upvop_free; 2879 PMAP_UNLOCK(); 2880 } 2881 2882 void 2883 pmap_pool_mfree(struct pool *pp, void *va) 2884 { 2885 struct pvo_page *pvop; 2886 2887 PMAP_LOCK(); 2888 pvop = va; 2889 SIMPLEQ_INSERT_HEAD(&pmap_mpvop_head, pvop, pvop_link); 2890 pmap_mpvop_free++; 2891 if (pmap_mpvop_free > pmap_mpvop_maxfree) 2892 pmap_mpvop_maxfree = pmap_mpvop_free; 2893 PMAP_UNLOCK(); 2894 #if 0 2895 uvm_pagefree(PHYS_TO_VM_PAGE((paddr_t) va)); 2896 #endif 2897 } 2898 2899 /* 2900 * This routine in bootstraping to steal to-be-managed memory (which will 2901 * then be unmanaged). We use it to grab from the first 256MB for our 2902 * pmap needs and above 256MB for other stuff. 2903 */ 2904 vaddr_t 2905 pmap_steal_memory(vsize_t vsize, vaddr_t *vstartp, vaddr_t *vendp) 2906 { 2907 vsize_t size; 2908 vaddr_t va; 2909 paddr_t pa = 0; 2910 int npgs, bank; 2911 struct vm_physseg *ps; 2912 2913 if (uvm.page_init_done == true) 2914 panic("pmap_steal_memory: called _after_ bootstrap"); 2915 2916 *vstartp = VM_MIN_KERNEL_ADDRESS; 2917 *vendp = VM_MAX_KERNEL_ADDRESS; 2918 2919 size = round_page(vsize); 2920 npgs = atop(size); 2921 2922 /* 2923 * PA 0 will never be among those given to UVM so we can use it 2924 * to indicate we couldn't steal any memory. 2925 */ 2926 for (bank = 0; bank < vm_nphysseg; bank++) { 2927 ps = VM_PHYSMEM_PTR(bank); 2928 if (ps->free_list == VM_FREELIST_FIRST256 && 2929 ps->avail_end - ps->avail_start >= npgs) { 2930 pa = ptoa(ps->avail_start); 2931 break; 2932 } 2933 } 2934 2935 if (pa == 0) 2936 panic("pmap_steal_memory: no approriate memory to steal!"); 2937 2938 ps->avail_start += npgs; 2939 ps->start += npgs; 2940 2941 /* 2942 * If we've used up all the pages in the segment, remove it and 2943 * compact the list. 2944 */ 2945 if (ps->avail_start == ps->end) { 2946 /* 2947 * If this was the last one, then a very bad thing has occurred 2948 */ 2949 if (--vm_nphysseg == 0) 2950 panic("pmap_steal_memory: out of memory!"); 2951 2952 printf("pmap_steal_memory: consumed bank %d\n", bank); 2953 for (; bank < vm_nphysseg; bank++, ps++) { 2954 ps[0] = ps[1]; 2955 } 2956 } 2957 2958 va = (vaddr_t) pa; 2959 memset((void *) va, 0, size); 2960 pmap_pages_stolen += npgs; 2961 #ifdef DEBUG 2962 if (pmapdebug && npgs > 1) { 2963 u_int cnt = 0; 2964 for (bank = 0; bank < vm_nphysseg; bank++) { 2965 ps = VM_PHYSMEM_PTR(bank); 2966 cnt += ps->avail_end - ps->avail_start; 2967 } 2968 printf("pmap_steal_memory: stole %u (total %u) pages (%u left)\n", 2969 npgs, pmap_pages_stolen, cnt); 2970 } 2971 #endif 2972 2973 return va; 2974 } 2975 2976 /* 2977 * Find a chuck of memory with right size and alignment. 2978 */ 2979 paddr_t 2980 pmap_boot_find_memory(psize_t size, psize_t alignment, int at_end) 2981 { 2982 struct mem_region *mp; 2983 paddr_t s, e; 2984 int i, j; 2985 2986 size = round_page(size); 2987 2988 DPRINTFN(BOOT, 2989 ("pmap_boot_find_memory: size=%#" _PRIxpa ", alignment=%#" _PRIxpa ", at_end=%d", 2990 size, alignment, at_end)); 2991 2992 if (alignment < PAGE_SIZE || (alignment & (alignment-1)) != 0) 2993 panic("pmap_boot_find_memory: invalid alignment %#" _PRIxpa, 2994 alignment); 2995 2996 if (at_end) { 2997 if (alignment != PAGE_SIZE) 2998 panic("pmap_boot_find_memory: invalid ending " 2999 "alignment %#" _PRIxpa, alignment); 3000 3001 for (mp = &avail[avail_cnt-1]; mp >= avail; mp--) { 3002 s = mp->start + mp->size - size; 3003 if (s >= mp->start && mp->size >= size) { 3004 DPRINTFN(BOOT,(": %#" _PRIxpa "\n", s)); 3005 DPRINTFN(BOOT, 3006 ("pmap_boot_find_memory: b-avail[%d] start " 3007 "%#" _PRIxpa " size %#" _PRIxpa "\n", mp - avail, 3008 mp->start, mp->size)); 3009 mp->size -= size; 3010 DPRINTFN(BOOT, 3011 ("pmap_boot_find_memory: a-avail[%d] start " 3012 "%#" _PRIxpa " size %#" _PRIxpa "\n", mp - avail, 3013 mp->start, mp->size)); 3014 return s; 3015 } 3016 } 3017 panic("pmap_boot_find_memory: no available memory"); 3018 } 3019 3020 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) { 3021 s = (mp->start + alignment - 1) & ~(alignment-1); 3022 e = s + size; 3023 3024 /* 3025 * Is the calculated region entirely within the region? 3026 */ 3027 if (s < mp->start || e > mp->start + mp->size) 3028 continue; 3029 3030 DPRINTFN(BOOT,(": %#" _PRIxpa "\n", s)); 3031 if (s == mp->start) { 3032 /* 3033 * If the block starts at the beginning of region, 3034 * adjust the size & start. (the region may now be 3035 * zero in length) 3036 */ 3037 DPRINTFN(BOOT, 3038 ("pmap_boot_find_memory: b-avail[%d] start " 3039 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size)); 3040 mp->start += size; 3041 mp->size -= size; 3042 DPRINTFN(BOOT, 3043 ("pmap_boot_find_memory: a-avail[%d] start " 3044 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size)); 3045 } else if (e == mp->start + mp->size) { 3046 /* 3047 * If the block starts at the beginning of region, 3048 * adjust only the size. 3049 */ 3050 DPRINTFN(BOOT, 3051 ("pmap_boot_find_memory: b-avail[%d] start " 3052 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size)); 3053 mp->size -= size; 3054 DPRINTFN(BOOT, 3055 ("pmap_boot_find_memory: a-avail[%d] start " 3056 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size)); 3057 } else { 3058 /* 3059 * Block is in the middle of the region, so we 3060 * have to split it in two. 3061 */ 3062 for (j = avail_cnt; j > i + 1; j--) { 3063 avail[j] = avail[j-1]; 3064 } 3065 DPRINTFN(BOOT, 3066 ("pmap_boot_find_memory: b-avail[%d] start " 3067 "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size)); 3068 mp[1].start = e; 3069 mp[1].size = mp[0].start + mp[0].size - e; 3070 mp[0].size = s - mp[0].start; 3071 avail_cnt++; 3072 for (; i < avail_cnt; i++) { 3073 DPRINTFN(BOOT, 3074 ("pmap_boot_find_memory: a-avail[%d] " 3075 "start %#" _PRIxpa " size %#" _PRIxpa "\n", i, 3076 avail[i].start, avail[i].size)); 3077 } 3078 } 3079 KASSERT(s == (uintptr_t) s); 3080 return s; 3081 } 3082 panic("pmap_boot_find_memory: not enough memory for " 3083 "%#" _PRIxpa "/%#" _PRIxpa " allocation?", size, alignment); 3084 } 3085 3086 /* XXXSL: we dont have any BATs to do this, map in Segment 0 1:1 using page tables */ 3087 #if defined (PMAP_OEA64_BRIDGE) 3088 int 3089 pmap_setup_segment0_map(int use_large_pages, ...) 3090 { 3091 vaddr_t va; 3092 3093 register_t pte_lo = 0x0; 3094 int ptegidx = 0, i = 0; 3095 struct pte pte; 3096 va_list ap; 3097 3098 /* Coherent + Supervisor RW, no user access */ 3099 pte_lo = PTE_M; 3100 3101 /* XXXSL 3102 * Map in 1st segment 1:1, we'll be careful not to spill kernel entries later, 3103 * these have to take priority. 3104 */ 3105 for (va = 0x0; va < SEGMENT_LENGTH; va += 0x1000) { 3106 ptegidx = va_to_pteg(pmap_kernel(), va); 3107 pmap_pte_create(&pte, pmap_kernel(), va, va | pte_lo); 3108 i = pmap_pte_insert(ptegidx, &pte); 3109 } 3110 3111 va_start(ap, use_large_pages); 3112 while (1) { 3113 paddr_t pa; 3114 size_t size; 3115 3116 va = va_arg(ap, vaddr_t); 3117 3118 if (va == 0) 3119 break; 3120 3121 pa = va_arg(ap, paddr_t); 3122 size = va_arg(ap, size_t); 3123 3124 for (; va < (va + size); va += 0x1000, pa += 0x1000) { 3125 #if 0 3126 printf("%s: Inserting: va: %#" _PRIxva ", pa: %#" _PRIxpa "\n", __func__, va, pa); 3127 #endif 3128 ptegidx = va_to_pteg(pmap_kernel(), va); 3129 pmap_pte_create(&pte, pmap_kernel(), va, pa | pte_lo); 3130 i = pmap_pte_insert(ptegidx, &pte); 3131 } 3132 } 3133 3134 TLBSYNC(); 3135 SYNC(); 3136 return (0); 3137 } 3138 #endif /* PMAP_OEA64_BRIDGE */ 3139 3140 /* 3141 * This is not part of the defined PMAP interface and is specific to the 3142 * PowerPC architecture. This is called during initppc, before the system 3143 * is really initialized. 3144 */ 3145 void 3146 pmap_bootstrap(paddr_t kernelstart, paddr_t kernelend) 3147 { 3148 struct mem_region *mp, tmp; 3149 paddr_t s, e; 3150 psize_t size; 3151 int i, j; 3152 3153 /* 3154 * Get memory. 3155 */ 3156 mem_regions(&mem, &avail); 3157 #if defined(DEBUG) 3158 if (pmapdebug & PMAPDEBUG_BOOT) { 3159 printf("pmap_bootstrap: memory configuration:\n"); 3160 for (mp = mem; mp->size; mp++) { 3161 printf("pmap_bootstrap: mem start %#" _PRIxpa " size %#" _PRIxpa "\n", 3162 mp->start, mp->size); 3163 } 3164 for (mp = avail; mp->size; mp++) { 3165 printf("pmap_bootstrap: avail start %#" _PRIxpa " size %#" _PRIxpa "\n", 3166 mp->start, mp->size); 3167 } 3168 } 3169 #endif 3170 3171 /* 3172 * Find out how much physical memory we have and in how many chunks. 3173 */ 3174 for (mem_cnt = 0, mp = mem; mp->size; mp++) { 3175 if (mp->start >= pmap_memlimit) 3176 continue; 3177 if (mp->start + mp->size > pmap_memlimit) { 3178 size = pmap_memlimit - mp->start; 3179 physmem += btoc(size); 3180 } else { 3181 physmem += btoc(mp->size); 3182 } 3183 mem_cnt++; 3184 } 3185 3186 /* 3187 * Count the number of available entries. 3188 */ 3189 for (avail_cnt = 0, mp = avail; mp->size; mp++) 3190 avail_cnt++; 3191 3192 /* 3193 * Page align all regions. 3194 */ 3195 kernelstart = trunc_page(kernelstart); 3196 kernelend = round_page(kernelend); 3197 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) { 3198 s = round_page(mp->start); 3199 mp->size -= (s - mp->start); 3200 mp->size = trunc_page(mp->size); 3201 mp->start = s; 3202 e = mp->start + mp->size; 3203 3204 DPRINTFN(BOOT, 3205 ("pmap_bootstrap: b-avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n", 3206 i, mp->start, mp->size)); 3207 3208 /* 3209 * Don't allow the end to run beyond our artificial limit 3210 */ 3211 if (e > pmap_memlimit) 3212 e = pmap_memlimit; 3213 3214 /* 3215 * Is this region empty or strange? skip it. 3216 */ 3217 if (e <= s) { 3218 mp->start = 0; 3219 mp->size = 0; 3220 continue; 3221 } 3222 3223 /* 3224 * Does this overlap the beginning of kernel? 3225 * Does extend past the end of the kernel? 3226 */ 3227 else if (s < kernelstart && e > kernelstart) { 3228 if (e > kernelend) { 3229 avail[avail_cnt].start = kernelend; 3230 avail[avail_cnt].size = e - kernelend; 3231 avail_cnt++; 3232 } 3233 mp->size = kernelstart - s; 3234 } 3235 /* 3236 * Check whether this region overlaps the end of the kernel. 3237 */ 3238 else if (s < kernelend && e > kernelend) { 3239 mp->start = kernelend; 3240 mp->size = e - kernelend; 3241 } 3242 /* 3243 * Look whether this regions is completely inside the kernel. 3244 * Nuke it if it does. 3245 */ 3246 else if (s >= kernelstart && e <= kernelend) { 3247 mp->start = 0; 3248 mp->size = 0; 3249 } 3250 /* 3251 * If the user imposed a memory limit, enforce it. 3252 */ 3253 else if (s >= pmap_memlimit) { 3254 mp->start = -PAGE_SIZE; /* let's know why */ 3255 mp->size = 0; 3256 } 3257 else { 3258 mp->start = s; 3259 mp->size = e - s; 3260 } 3261 DPRINTFN(BOOT, 3262 ("pmap_bootstrap: a-avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n", 3263 i, mp->start, mp->size)); 3264 } 3265 3266 /* 3267 * Move (and uncount) all the null return to the end. 3268 */ 3269 for (mp = avail, i = 0; i < avail_cnt; i++, mp++) { 3270 if (mp->size == 0) { 3271 tmp = avail[i]; 3272 avail[i] = avail[--avail_cnt]; 3273 avail[avail_cnt] = avail[i]; 3274 } 3275 } 3276 3277 /* 3278 * (Bubble)sort them into ascending order. 3279 */ 3280 for (i = 0; i < avail_cnt; i++) { 3281 for (j = i + 1; j < avail_cnt; j++) { 3282 if (avail[i].start > avail[j].start) { 3283 tmp = avail[i]; 3284 avail[i] = avail[j]; 3285 avail[j] = tmp; 3286 } 3287 } 3288 } 3289 3290 /* 3291 * Make sure they don't overlap. 3292 */ 3293 for (mp = avail, i = 0; i < avail_cnt - 1; i++, mp++) { 3294 if (mp[0].start + mp[0].size > mp[1].start) { 3295 mp[0].size = mp[1].start - mp[0].start; 3296 } 3297 DPRINTFN(BOOT, 3298 ("pmap_bootstrap: avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n", 3299 i, mp->start, mp->size)); 3300 } 3301 DPRINTFN(BOOT, 3302 ("pmap_bootstrap: avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n", 3303 i, mp->start, mp->size)); 3304 3305 #ifdef PTEGCOUNT 3306 pmap_pteg_cnt = PTEGCOUNT; 3307 #else /* PTEGCOUNT */ 3308 3309 pmap_pteg_cnt = 0x1000; 3310 3311 while (pmap_pteg_cnt < physmem) 3312 pmap_pteg_cnt <<= 1; 3313 3314 pmap_pteg_cnt >>= 1; 3315 #endif /* PTEGCOUNT */ 3316 3317 #ifdef DEBUG 3318 DPRINTFN(BOOT, 3319 ("pmap_pteg_cnt: 0x%x\n", pmap_pteg_cnt)); 3320 #endif 3321 3322 /* 3323 * Find suitably aligned memory for PTEG hash table. 3324 */ 3325 size = pmap_pteg_cnt * sizeof(struct pteg); 3326 pmap_pteg_table = (void *)(uintptr_t) pmap_boot_find_memory(size, size, 0); 3327 3328 #ifdef DEBUG 3329 DPRINTFN(BOOT, 3330 ("PTEG cnt: 0x%x HTAB size: 0x%08x bytes, address: %p\n", pmap_pteg_cnt, (unsigned int)size, pmap_pteg_table)); 3331 #endif 3332 3333 3334 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 3335 if ( (uintptr_t) pmap_pteg_table + size > SEGMENT_LENGTH) 3336 panic("pmap_bootstrap: pmap_pteg_table end (%p + %#" _PRIxpa ") > 256MB", 3337 pmap_pteg_table, size); 3338 #endif 3339 3340 memset(__UNVOLATILE(pmap_pteg_table), 0, 3341 pmap_pteg_cnt * sizeof(struct pteg)); 3342 pmap_pteg_mask = pmap_pteg_cnt - 1; 3343 3344 /* 3345 * We cannot do pmap_steal_memory here since UVM hasn't been loaded 3346 * with pages. So we just steal them before giving them to UVM. 3347 */ 3348 size = sizeof(pmap_pvo_table[0]) * pmap_pteg_cnt; 3349 pmap_pvo_table = (void *)(uintptr_t) pmap_boot_find_memory(size, PAGE_SIZE, 0); 3350 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 3351 if ( (uintptr_t) pmap_pvo_table + size > SEGMENT_LENGTH) 3352 panic("pmap_bootstrap: pmap_pvo_table end (%p + %#" _PRIxpa ") > 256MB", 3353 pmap_pvo_table, size); 3354 #endif 3355 3356 for (i = 0; i < pmap_pteg_cnt; i++) 3357 TAILQ_INIT(&pmap_pvo_table[i]); 3358 3359 #ifndef MSGBUFADDR 3360 /* 3361 * Allocate msgbuf in high memory. 3362 */ 3363 msgbuf_paddr = pmap_boot_find_memory(MSGBUFSIZE, PAGE_SIZE, 1); 3364 #endif 3365 3366 for (mp = avail, i = 0; i < avail_cnt; mp++, i++) { 3367 paddr_t pfstart = atop(mp->start); 3368 paddr_t pfend = atop(mp->start + mp->size); 3369 if (mp->size == 0) 3370 continue; 3371 if (mp->start + mp->size <= SEGMENT_LENGTH) { 3372 uvm_page_physload(pfstart, pfend, pfstart, pfend, 3373 VM_FREELIST_FIRST256); 3374 } else if (mp->start >= SEGMENT_LENGTH) { 3375 uvm_page_physload(pfstart, pfend, pfstart, pfend, 3376 VM_FREELIST_DEFAULT); 3377 } else { 3378 pfend = atop(SEGMENT_LENGTH); 3379 uvm_page_physload(pfstart, pfend, pfstart, pfend, 3380 VM_FREELIST_FIRST256); 3381 pfstart = atop(SEGMENT_LENGTH); 3382 pfend = atop(mp->start + mp->size); 3383 uvm_page_physload(pfstart, pfend, pfstart, pfend, 3384 VM_FREELIST_DEFAULT); 3385 } 3386 } 3387 3388 /* 3389 * Make sure kernel vsid is allocated as well as VSID 0. 3390 */ 3391 pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS-1)) / VSID_NBPW] 3392 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 3393 pmap_vsid_bitmap[(PHYSMAP_VSIDBITS & (NPMAPS-1)) / VSID_NBPW] 3394 |= 1 << (PHYSMAP_VSIDBITS % VSID_NBPW); 3395 pmap_vsid_bitmap[0] |= 1; 3396 3397 /* 3398 * Initialize kernel pmap and hardware. 3399 */ 3400 3401 /* PMAP_OEA64_BRIDGE does support these instructions */ 3402 #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE) 3403 for (i = 0; i < 16; i++) { 3404 pmap_kernel()->pm_sr[i] = KERNELN_SEGMENT(i)|SR_PRKEY; 3405 __asm volatile ("mtsrin %0,%1" 3406 :: "r"(KERNELN_SEGMENT(i)|SR_PRKEY), "r"(i << ADDR_SR_SHFT)); 3407 } 3408 3409 pmap_kernel()->pm_sr[KERNEL_SR] = KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY; 3410 __asm volatile ("mtsr %0,%1" 3411 :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT)); 3412 #ifdef KERNEL2_SR 3413 pmap_kernel()->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT|SR_SUKEY|SR_PRKEY; 3414 __asm volatile ("mtsr %0,%1" 3415 :: "n"(KERNEL2_SR), "r"(KERNEL2_SEGMENT)); 3416 #endif 3417 #endif /* PMAP_OEA || PMAP_OEA64_BRIDGE */ 3418 #if defined (PMAP_OEA) 3419 for (i = 0; i < 16; i++) { 3420 if (iosrtable[i] & SR601_T) { 3421 pmap_kernel()->pm_sr[i] = iosrtable[i]; 3422 __asm volatile ("mtsrin %0,%1" 3423 :: "r"(iosrtable[i]), "r"(i << ADDR_SR_SHFT)); 3424 } 3425 } 3426 __asm volatile ("sync; mtsdr1 %0; isync" 3427 :: "r"((uintptr_t)pmap_pteg_table | (pmap_pteg_mask >> 10))); 3428 #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE) 3429 __asm __volatile ("sync; mtsdr1 %0; isync" 3430 :: "r"((uintptr_t)pmap_pteg_table | (32 - cntlzw(pmap_pteg_mask >> 11)))); 3431 #endif 3432 tlbia(); 3433 3434 #ifdef ALTIVEC 3435 pmap_use_altivec = cpu_altivec; 3436 #endif 3437 3438 #ifdef DEBUG 3439 if (pmapdebug & PMAPDEBUG_BOOT) { 3440 u_int cnt; 3441 int bank; 3442 char pbuf[9]; 3443 for (cnt = 0, bank = 0; bank < vm_nphysseg; bank++) { 3444 cnt += VM_PHYSMEM_PTR(bank)->avail_end - VM_PHYSMEM_PTR(bank)->avail_start; 3445 printf("pmap_bootstrap: vm_physmem[%d]=%#" _PRIxpa "-%#" _PRIxpa "/%#" _PRIxpa "\n", 3446 bank, 3447 ptoa(VM_PHYSMEM_PTR(bank)->avail_start), 3448 ptoa(VM_PHYSMEM_PTR(bank)->avail_end), 3449 ptoa(VM_PHYSMEM_PTR(bank)->avail_end - VM_PHYSMEM_PTR(bank)->avail_start)); 3450 } 3451 format_bytes(pbuf, sizeof(pbuf), ptoa((u_int64_t) cnt)); 3452 printf("pmap_bootstrap: UVM memory = %s (%u pages)\n", 3453 pbuf, cnt); 3454 } 3455 #endif 3456 3457 pool_init(&pmap_upvo_pool, sizeof(struct pvo_entry), 3458 sizeof(struct pvo_entry), 0, 0, "pmap_upvopl", 3459 &pmap_pool_uallocator, IPL_VM); 3460 3461 pool_setlowat(&pmap_upvo_pool, 252); 3462 3463 pool_init(&pmap_pool, sizeof(struct pmap), 3464 sizeof(void *), 0, 0, "pmap_pl", &pmap_pool_uallocator, 3465 IPL_NONE); 3466 3467 #if defined(PMAP_NEED_MAPKERNEL) || 1 3468 { 3469 struct pmap *pm = pmap_kernel(); 3470 #if defined(PMAP_NEED_FULL_MAPKERNEL) 3471 extern int etext[], kernel_text[]; 3472 vaddr_t va, va_etext = (paddr_t) etext; 3473 #endif 3474 paddr_t pa, pa_end; 3475 register_t sr; 3476 struct pte pt; 3477 unsigned int ptegidx; 3478 int bank; 3479 3480 sr = PHYSMAPN_SEGMENT(0) | SR_SUKEY|SR_PRKEY; 3481 pm->pm_sr[0] = sr; 3482 3483 for (bank = 0; bank < vm_nphysseg; bank++) { 3484 pa_end = ptoa(VM_PHYSMEM_PTR(bank)->avail_end); 3485 pa = ptoa(VM_PHYSMEM_PTR(bank)->avail_start); 3486 for (; pa < pa_end; pa += PAGE_SIZE) { 3487 ptegidx = va_to_pteg(pm, pa); 3488 pmap_pte_create(&pt, pm, pa, pa | PTE_M|PTE_BW); 3489 pmap_pte_insert(ptegidx, &pt); 3490 } 3491 } 3492 3493 #if defined(PMAP_NEED_FULL_MAPKERNEL) 3494 va = (vaddr_t) kernel_text; 3495 3496 for (pa = kernelstart; va < va_etext; 3497 pa += PAGE_SIZE, va += PAGE_SIZE) { 3498 ptegidx = va_to_pteg(pm, va); 3499 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BR); 3500 pmap_pte_insert(ptegidx, &pt); 3501 } 3502 3503 for (; pa < kernelend; 3504 pa += PAGE_SIZE, va += PAGE_SIZE) { 3505 ptegidx = va_to_pteg(pm, va); 3506 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW); 3507 pmap_pte_insert(ptegidx, &pt); 3508 } 3509 3510 for (va = 0, pa = 0; va < kernelstart; 3511 pa += PAGE_SIZE, va += PAGE_SIZE) { 3512 ptegidx = va_to_pteg(pm, va); 3513 if (va < 0x3000) 3514 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BR); 3515 else 3516 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW); 3517 pmap_pte_insert(ptegidx, &pt); 3518 } 3519 for (va = kernelend, pa = kernelend; va < SEGMENT_LENGTH; 3520 pa += PAGE_SIZE, va += PAGE_SIZE) { 3521 ptegidx = va_to_pteg(pm, va); 3522 pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW); 3523 pmap_pte_insert(ptegidx, &pt); 3524 } 3525 #endif 3526 3527 __asm volatile ("mtsrin %0,%1" 3528 :: "r"(sr), "r"(kernelstart)); 3529 } 3530 #endif 3531 } 3532