1 /* $NetBSD: pmap.c,v 1.8 2001/11/26 23:26:33 thorpej Exp $ */ 2 3 /* 4 * Copyright 2001 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 40 * Copyright (C) 1995, 1996 TooLs GmbH. 41 * All rights reserved. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. All advertising materials mentioning features or use of this software 52 * must display the following acknowledgement: 53 * This product includes software developed by TooLs GmbH. 54 * 4. The name of TooLs GmbH may not be used to endorse or promote products 55 * derived from this software without specific prior written permission. 56 * 57 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 58 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 59 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 60 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 62 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 63 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 64 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 65 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 66 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 67 */ 68 69 #undef PPC_4XX_NOCACHE 70 71 #include <sys/param.h> 72 #include <sys/malloc.h> 73 #include <sys/proc.h> 74 #include <sys/user.h> 75 #include <sys/queue.h> 76 #include <sys/systm.h> 77 #include <sys/pool.h> 78 #include <sys/device.h> 79 80 #include <uvm/uvm.h> 81 82 #include <machine/pcb.h> 83 #include <machine/powerpc.h> 84 85 #include <powerpc/spr.h> 86 #include <powerpc/ibm4xx/tlb.h> 87 88 89 #define CACHE_LINE 32 90 91 /* 92 * kernmap is an array of PTEs large enough to map in 93 * 4GB. At 16KB/page it is 256K entries or 2MB. 94 */ 95 #define KERNMAP_SIZE ((0xffffffffU/NBPG)+1) 96 caddr_t kernmap; 97 98 #define MINCTX 2 99 #define NUMCTX 256 100 volatile struct pmap *ctxbusy[NUMCTX]; 101 102 #define TLBF_USED 0x1 103 #define TLBF_REF 0x2 104 #define TLBF_LOCKED 0x4 105 #define TLB_LOCKED(i) (tlb_info[(i)].ti_flags & TLBF_LOCKED) 106 typedef struct tlb_info_s { 107 char ti_flags; 108 char ti_ctx; /* TLB_PID assiciated with the entry */ 109 u_int ti_va; 110 } tlb_info_t; 111 112 volatile tlb_info_t tlb_info[NTLB]; 113 /* We'll use a modified FIFO replacement policy cause it's cheap */ 114 volatile int tlbnext = TLB_NRESERVED; 115 116 u_long dtlb_miss_count = 0; 117 u_long itlb_miss_count = 0; 118 u_long ktlb_miss_count = 0; 119 u_long utlb_miss_count = 0; 120 121 /* Event counters -- XXX type `INTR' so we can see them with vmstat -i */ 122 struct evcnt tlbmiss_ev = EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 123 NULL, "cpu", "tlbmiss"); 124 struct evcnt tlbhit_ev = EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 125 NULL, "cpu", "tlbhit"); 126 struct evcnt tlbflush_ev = EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 127 NULL, "cpu", "tlbflush"); 128 struct evcnt tlbenter_ev = EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 129 NULL, "cpu", "tlbenter"); 130 131 struct pmap kernel_pmap_; 132 133 int physmem; 134 static int npgs; 135 static u_int nextavail; 136 #ifndef MSGBUFADDR 137 extern paddr_t msgbuf_paddr; 138 #endif 139 140 static struct mem_region *mem, *avail; 141 142 /* 143 * This is a cache of referenced/modified bits. 144 * Bits herein are shifted by ATTRSHFT. 145 */ 146 static char *pmap_attrib; 147 148 #define PV_WIRED 0x1 149 #define PV_WIRE(pv) ((pv)->pv_va |= PV_WIRED) 150 #define PV_CMPVA(va,pv) (!(((pv)->pv_va^(va))&(~PV_WIRED))) 151 152 struct pv_entry { 153 struct pv_entry *pv_next; /* Linked list of mappings */ 154 vaddr_t pv_va; /* virtual address of mapping */ 155 struct pmap *pv_pm; 156 }; 157 158 struct pv_entry *pv_table; 159 static struct pool pv_pool; 160 161 static int pmap_initialized; 162 163 static int ctx_flush(int); 164 165 static inline void dcache_flush_page(vaddr_t); 166 static inline void icache_flush_page(vaddr_t); 167 static inline void dcache_flush(vaddr_t, vsize_t); 168 static inline void icache_flush(vaddr_t, vsize_t); 169 170 inline struct pv_entry *pa_to_pv(paddr_t); 171 static inline char *pa_to_attr(paddr_t); 172 173 static inline volatile u_int *pte_find(struct pmap *, vaddr_t); 174 static inline int pte_enter(struct pmap *, vaddr_t, u_int); 175 176 static void pmap_pinit(pmap_t); 177 static void pmap_release(pmap_t); 178 static inline int pmap_enter_pv(struct pmap *, vaddr_t, paddr_t); 179 static void pmap_remove_pv(struct pmap *, vaddr_t, paddr_t); 180 181 /* 182 * These small routines may have to be replaced, 183 * if/when we support processors other that the 604. 184 */ 185 186 static inline void 187 dcache_flush_page(vaddr_t va) 188 { 189 int i; 190 191 for (i = 0; i < NBPG; i += CACHE_LINE) 192 asm volatile("dcbf %0,%1" : : "r" (va), "r" (i)); 193 asm volatile("sync;isync" : : ); 194 } 195 196 static inline void 197 icache_flush_page(vaddr_t va) 198 { 199 int i; 200 201 for (i = 0; i < NBPG; i += CACHE_LINE) 202 asm volatile("icbi %0,%1" : : "r" (va), "r" (i)); 203 asm volatile("sync;isync" : : ); 204 } 205 206 static inline void 207 dcache_flush(vaddr_t va, vsize_t len) 208 { 209 int i; 210 211 if (len == 0) 212 return; 213 214 /* Make sure we flush all cache lines */ 215 len += va & (CACHE_LINE-1); 216 for (i = 0; i < len; i += CACHE_LINE) 217 asm volatile("dcbf %0,%1" : : "r" (va), "r" (i)); 218 asm volatile("sync;isync" : : ); 219 } 220 221 static inline void 222 icache_flush(vaddr_t va, vsize_t len) 223 { 224 int i; 225 226 if (len == 0) 227 return; 228 229 /* Make sure we flush all cache lines */ 230 len += va & (CACHE_LINE-1); 231 for (i = 0; i < len; i += CACHE_LINE) 232 asm volatile("icbi %0,%1" : : "r" (va), "r" (i)); 233 asm volatile("sync;isync" : : ); 234 } 235 236 inline struct pv_entry * 237 pa_to_pv(paddr_t pa) 238 { 239 int bank, pg; 240 241 bank = vm_physseg_find(atop(pa), &pg); 242 if (bank == -1) 243 return NULL; 244 return &vm_physmem[bank].pmseg.pvent[pg]; 245 } 246 247 static inline char * 248 pa_to_attr(paddr_t pa) 249 { 250 int bank, pg; 251 252 bank = vm_physseg_find(atop(pa), &pg); 253 if (bank == -1) 254 return NULL; 255 return &vm_physmem[bank].pmseg.attrs[pg]; 256 } 257 258 /* 259 * Insert PTE into page table. 260 */ 261 int 262 pte_enter(struct pmap *pm, vaddr_t va, u_int pte) 263 { 264 int seg = STIDX(va); 265 int ptn = PTIDX(va); 266 paddr_t pa; 267 268 if (!pm->pm_ptbl[seg]) { 269 /* Don't allocate a page to clear a non-existent mapping. */ 270 if (!pte) return (1); 271 /* Allocate a page XXXX this will sleep! */ 272 pa = 0; 273 pm->pm_ptbl[seg] = (uint *)uvm_km_alloc1(kernel_map, NBPG, 1); 274 } 275 pm->pm_ptbl[seg][ptn] = pte; 276 277 /* Flush entry. */ 278 ppc4xx_tlb_flush(va, pm->pm_ctx); 279 return (1); 280 } 281 282 /* 283 * Get a pointer to a PTE in a page table. 284 */ 285 volatile u_int * 286 pte_find(struct pmap *pm, vaddr_t va) 287 { 288 int seg = STIDX(va); 289 int ptn = PTIDX(va); 290 291 if (pm->pm_ptbl[seg]) 292 return (&pm->pm_ptbl[seg][ptn]); 293 294 return (NULL); 295 } 296 297 /* 298 * This is called during initppc, before the system is really initialized. 299 */ 300 void 301 pmap_bootstrap(u_int kernelstart, u_int kernelend) 302 { 303 struct mem_region *mp, *mp1; 304 int cnt, i; 305 u_int s, e, sz; 306 307 /* 308 * Allocate the kernel page table at the end of 309 * kernel space so it's in the locked TTE. 310 */ 311 kernmap = (caddr_t)kernelend; 312 // kernelend += KERNMAP_SIZE*sizeof(struct pte); 313 314 /* 315 * Initialize kernel page table. 316 */ 317 // memset(kernmap, 0, KERNMAP_SIZE*sizeof(struct pte)); 318 for (i = 0; i < STSZ; i++) { 319 pmap_kernel()->pm_ptbl[i] = 0; // (u_int *)(kernmap + i*NBPG); 320 } 321 ctxbusy[0] = ctxbusy[1] = pmap_kernel(); 322 323 /* 324 * Announce page-size to the VM-system 325 */ 326 uvmexp.pagesize = NBPG; 327 uvm_setpagesize(); 328 329 /* 330 * Get memory. 331 */ 332 mem_regions(&mem, &avail); 333 for (mp = mem; mp->size; mp++) { 334 physmem += btoc(mp->size); 335 printf("+%lx,",mp->size); 336 } 337 printf("\n"); 338 ppc4xx_tlb_init(); 339 /* 340 * Count the number of available entries. 341 */ 342 for (cnt = 0, mp = avail; mp->size; mp++) 343 cnt++; 344 345 /* 346 * Page align all regions. 347 * Non-page aligned memory isn't very interesting to us. 348 * Also, sort the entries for ascending addresses. 349 */ 350 kernelstart &= ~PGOFSET; 351 kernelend = (kernelend + PGOFSET) & ~PGOFSET; 352 for (mp = avail; mp->size; mp++) { 353 s = mp->start; 354 e = mp->start + mp->size; 355 printf("%08x-%08x -> ",s,e); 356 /* 357 * Check whether this region holds all of the kernel. 358 */ 359 if (s < kernelstart && e > kernelend) { 360 avail[cnt].start = kernelend; 361 avail[cnt++].size = e - kernelend; 362 e = kernelstart; 363 } 364 /* 365 * Look whether this regions starts within the kernel. 366 */ 367 if (s >= kernelstart && s < kernelend) { 368 if (e <= kernelend) 369 goto empty; 370 s = kernelend; 371 } 372 /* 373 * Now look whether this region ends within the kernel. 374 */ 375 if (e > kernelstart && e <= kernelend) { 376 if (s >= kernelstart) 377 goto empty; 378 e = kernelstart; 379 } 380 /* 381 * Now page align the start and size of the region. 382 */ 383 s = round_page(s); 384 e = trunc_page(e); 385 if (e < s) 386 e = s; 387 sz = e - s; 388 printf("%08x-%08x = %x\n",s,e,sz); 389 /* 390 * Check whether some memory is left here. 391 */ 392 if (sz == 0) { 393 empty: 394 memmove(mp, mp + 1, 395 (cnt - (mp - avail)) * sizeof *mp); 396 cnt--; 397 mp--; 398 continue; 399 } 400 /* 401 * Do an insertion sort. 402 */ 403 npgs += btoc(sz); 404 for (mp1 = avail; mp1 < mp; mp1++) 405 if (s < mp1->start) 406 break; 407 if (mp1 < mp) { 408 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1); 409 mp1->start = s; 410 mp1->size = sz; 411 } else { 412 mp->start = s; 413 mp->size = sz; 414 } 415 } 416 417 /* 418 * We cannot do pmap_steal_memory here, 419 * since we don't run with translation enabled yet. 420 */ 421 #ifndef MSGBUFADDR 422 /* 423 * allow for msgbuf 424 */ 425 sz = round_page(MSGBUFSIZE); 426 mp = NULL; 427 for (mp1 = avail; mp1->size; mp1++) 428 if (mp1->size >= sz) 429 mp = mp1; 430 if (mp == NULL) 431 panic("not enough memory?"); 432 433 npgs -= btoc(sz); 434 msgbuf_paddr = mp->start + mp->size - sz; 435 mp->size -= sz; 436 if (mp->size <= 0) 437 memmove(mp, mp + 1, (cnt - (mp - avail)) * sizeof *mp); 438 #endif 439 440 printf("Loading pages\n"); 441 for (mp = avail; mp->size; mp++) 442 uvm_page_physload(atop(mp->start), atop(mp->start + mp->size), 443 atop(mp->start), atop(mp->start + mp->size), 444 VM_FREELIST_DEFAULT); 445 446 /* 447 * Initialize kernel pmap and hardware. 448 */ 449 /* Setup TLB pid allocator so it knows we alreadu using PID 1 */ 450 pmap_kernel()->pm_ctx = KERNEL_PID; 451 nextavail = avail->start; 452 453 454 evcnt_attach_static(&tlbhit_ev); 455 evcnt_attach_static(&tlbmiss_ev); 456 evcnt_attach_static(&tlbflush_ev); 457 evcnt_attach_static(&tlbenter_ev); 458 printf("Done\n"); 459 } 460 461 /* 462 * Restrict given range to physical memory 463 * 464 * (Used by /dev/mem) 465 */ 466 void 467 pmap_real_memory(paddr_t *start, psize_t *size) 468 { 469 struct mem_region *mp; 470 471 for (mp = mem; mp->size; mp++) { 472 if (*start + *size > mp->start && 473 *start < mp->start + mp->size) { 474 if (*start < mp->start) { 475 *size -= mp->start - *start; 476 *start = mp->start; 477 } 478 if (*start + *size > mp->start + mp->size) 479 *size = mp->start + mp->size - *start; 480 return; 481 } 482 } 483 *size = 0; 484 } 485 486 /* 487 * Initialize anything else for pmap handling. 488 * Called during vm_init(). 489 */ 490 void 491 pmap_init(void) 492 { 493 struct pv_entry *pv; 494 vsize_t sz; 495 vaddr_t addr; 496 int i, s; 497 int bank; 498 char *attr; 499 500 sz = (vsize_t)((sizeof(struct pv_entry) + 1) * npgs); 501 sz = round_page(sz); 502 addr = uvm_km_zalloc(kernel_map, sz); 503 s = splvm(); 504 pv = pv_table = (struct pv_entry *)addr; 505 for (i = npgs; --i >= 0;) 506 pv++->pv_pm = NULL; 507 pmap_attrib = (char *)pv; 508 memset(pv, 0, npgs); 509 510 pv = pv_table; 511 attr = pmap_attrib; 512 for (bank = 0; bank < vm_nphysseg; bank++) { 513 sz = vm_physmem[bank].end - vm_physmem[bank].start; 514 vm_physmem[bank].pmseg.pvent = pv; 515 vm_physmem[bank].pmseg.attrs = attr; 516 pv += sz; 517 attr += sz; 518 } 519 520 pmap_initialized = 1; 521 splx(s); 522 523 /* Setup a pool for additional pvlist structures */ 524 pool_init(&pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pv_entry", 0, 525 NULL, NULL, 0); 526 } 527 528 /* 529 * How much virtual space is available to the kernel? 530 */ 531 void 532 pmap_virtual_space(vaddr_t *start, vaddr_t *end) 533 { 534 535 #if 0 536 /* 537 * Reserve one segment for kernel virtual memory 538 */ 539 *start = (vaddr_t)(KERNEL_SR << ADDR_SR_SHFT); 540 *end = *start + SEGMENT_LENGTH; 541 #else 542 *start = (vaddr_t) VM_MIN_KERNEL_ADDRESS; 543 *end = (vaddr_t) VM_MAX_KERNEL_ADDRESS; 544 #endif 545 } 546 547 #ifdef PMAP_GROWKERNEL 548 /* 549 * Preallocate kernel page tables to a specified VA. 550 * This simply loops through the first TTE for each 551 * page table from the beginning of the kernel pmap, 552 * reads the entry, and if the result is 553 * zero (either invalid entry or no page table) it stores 554 * a zero there, populating page tables in the process. 555 * This is not the most efficient technique but i don't 556 * expect it to be called that often. 557 */ 558 extern struct vm_page *vm_page_alloc1 __P((void)); 559 extern void vm_page_free1 __P((struct vm_page *)); 560 561 vaddr_t kbreak = VM_MIN_KERNEL_ADDRESS; 562 563 vaddr_t 564 pmap_growkernel(maxkvaddr) 565 vaddr_t maxkvaddr; 566 { 567 int s; 568 int seg; 569 paddr_t pg; 570 struct pmap *pm = pmap_kernel(); 571 572 s = splvm(); 573 574 /* Align with the start of a page table */ 575 for (kbreak &= ~(PTMAP-1); kbreak < maxkvaddr; 576 kbreak += PTMAP) { 577 seg = STIDX(kbreak); 578 579 if (pte_find(pm, kbreak)) continue; 580 581 if (uvm.page_init_done) { 582 pg = (paddr_t)VM_PAGE_TO_PHYS(vm_page_alloc1()); 583 } else { 584 if (!uvm_page_physget(&pg)) 585 panic("pmap_growkernel: no memory"); 586 } 587 if (!pg) panic("pmap_growkernel: no pages"); 588 pmap_zero_page((paddr_t)pg); 589 590 /* XXX This is based on all phymem being addressable */ 591 pm->pm_ptbl[seg] = (u_int *)pg; 592 } 593 splx(s); 594 return (kbreak); 595 } 596 597 /* 598 * vm_page_alloc1: 599 * 600 * Allocate and return a memory cell with no associated object. 601 */ 602 struct vm_page * 603 vm_page_alloc1() 604 { 605 struct vm_page *pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); 606 if (pg) { 607 pg->wire_count = 1; /* no mappings yet */ 608 pg->flags &= ~PG_BUSY; /* never busy */ 609 } 610 return pg; 611 } 612 613 /* 614 * vm_page_free1: 615 * 616 * Returns the given page to the free list, 617 * disassociating it with any VM object. 618 * 619 * Object and page must be locked prior to entry. 620 */ 621 void 622 vm_page_free1(mem) 623 struct vm_page *mem; 624 { 625 if (mem->flags != (PG_CLEAN|PG_FAKE)) { 626 printf("Freeing invalid page %p\n", mem); 627 printf("pa = %llx\n", (unsigned long long)VM_PAGE_TO_PHYS(mem)); 628 Debugger(); 629 return; 630 } 631 mem->flags |= PG_BUSY; 632 mem->wire_count = 0; 633 uvm_pagefree(mem); 634 } 635 #endif 636 637 /* 638 * Create and return a physical map. 639 */ 640 struct pmap * 641 pmap_create(void) 642 { 643 struct pmap *pm; 644 645 pm = (struct pmap *)malloc(sizeof *pm, M_VMPMAP, M_WAITOK); 646 memset((caddr_t)pm, 0, sizeof *pm); 647 pmap_pinit(pm); 648 return pm; 649 } 650 651 /* 652 * Initialize a preallocated and zeroed pmap structure. 653 */ 654 void 655 pmap_pinit(struct pmap *pm) 656 { 657 int i; 658 659 /* 660 * Allocate some segment registers for this pmap. 661 */ 662 pm->pm_refs = 1; 663 for (i = 0; i < STSZ; i++) 664 pm->pm_ptbl[i] = NULL; 665 } 666 667 /* 668 * Add a reference to the given pmap. 669 */ 670 void 671 pmap_reference(struct pmap *pm) 672 { 673 674 pm->pm_refs++; 675 } 676 677 /* 678 * Retire the given pmap from service. 679 * Should only be called if the map contains no valid mappings. 680 */ 681 void 682 pmap_destroy(struct pmap *pm) 683 { 684 685 if (--pm->pm_refs == 0) { 686 pmap_release(pm); 687 free((caddr_t)pm, M_VMPMAP); 688 } 689 } 690 691 /* 692 * Release any resources held by the given physical map. 693 * Called when a pmap initialized by pmap_pinit is being released. 694 */ 695 static void 696 pmap_release(struct pmap *pm) 697 { 698 int i; 699 700 for (i = 0; i < STSZ; i++) 701 if (pm->pm_ptbl[i]) { 702 uvm_km_free(kernel_map, (vaddr_t)pm->pm_ptbl[i], NBPG); 703 pm->pm_ptbl[i] = NULL; 704 } 705 if (pm->pm_ctx) ctx_free(pm); 706 } 707 708 /* 709 * Copy the range specified by src_addr/len 710 * from the source map to the range dst_addr/len 711 * in the destination map. 712 * 713 * This routine is only advisory and need not do anything. 714 */ 715 void 716 pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap, vaddr_t dst_addr, 717 vsize_t len, vaddr_t src_addr) 718 { 719 } 720 721 /* 722 * Require that all active physical maps contain no 723 * incorrect entries NOW. 724 */ 725 void 726 pmap_update(struct pmap *pmap) 727 { 728 } 729 730 /* 731 * Garbage collects the physical map system for 732 * pages which are no longer used. 733 * Success need not be guaranteed -- that is, there 734 * may well be pages which are not referenced, but 735 * others may be collected. 736 * Called by the pageout daemon when pages are scarce. 737 */ 738 void 739 pmap_collect(struct pmap *pm) 740 { 741 } 742 743 /* 744 * Fill the given physical page with zeroes. 745 */ 746 void 747 pmap_zero_page(paddr_t pa) 748 { 749 750 #ifdef PPC_4XX_NOCACHE 751 memset((caddr_t)pa, 0, NBPG); 752 #else 753 int i; 754 755 for (i = NBPG/CACHELINESIZE; i > 0; i--) { 756 __asm __volatile ("dcbz 0,%0" :: "r"(pa)); 757 pa += CACHELINESIZE; 758 } 759 #endif 760 } 761 762 /* 763 * Copy the given physical source page to its destination. 764 */ 765 void 766 pmap_copy_page(paddr_t src, paddr_t dst) 767 { 768 769 memcpy((caddr_t)dst, (caddr_t)src, NBPG); 770 dcache_flush_page(dst); 771 } 772 773 /* 774 * This returns whether this is the first mapping of a page. 775 */ 776 static inline int 777 pmap_enter_pv(struct pmap *pm, vaddr_t va, paddr_t pa) 778 { 779 struct pv_entry *pv, *npv = NULL; 780 int s; 781 782 if (!pmap_initialized) 783 return 0; 784 785 s = splvm(); 786 787 pv = pa_to_pv(pa); 788 if (!pv->pv_pm) { 789 /* 790 * No entries yet, use header as the first entry. 791 */ 792 pv->pv_va = va; 793 pv->pv_pm = pm; 794 pv->pv_next = NULL; 795 } else { 796 /* 797 * There is at least one other VA mapping this page. 798 * Place this entry after the header. 799 */ 800 npv = pool_get(&pv_pool, PR_WAITOK); 801 if (!npv) return (0); 802 npv->pv_va = va; 803 npv->pv_pm = pm; 804 npv->pv_next = pv->pv_next; 805 pv->pv_next = npv; 806 } 807 splx(s); 808 return (1); 809 } 810 811 static void 812 pmap_remove_pv(struct pmap *pm, vaddr_t va, paddr_t pa) 813 { 814 struct pv_entry *pv, *npv; 815 816 /* 817 * Remove from the PV table. 818 */ 819 pv = pa_to_pv(pa); 820 if (!pv) return; 821 822 /* 823 * If it is the first entry on the list, it is actually 824 * in the header and we must copy the following entry up 825 * to the header. Otherwise we must search the list for 826 * the entry. In either case we free the now unused entry. 827 */ 828 if (pm == pv->pv_pm && PV_CMPVA(va, pv)) { 829 if ((npv = pv->pv_next)) { 830 *pv = *npv; 831 pool_put(&pv_pool, npv); 832 } else 833 pv->pv_pm = NULL; 834 } else { 835 for (; (npv = pv->pv_next) != NULL; pv = npv) 836 if (pm == npv->pv_pm && PV_CMPVA(va, npv)) 837 break; 838 if (npv) { 839 pv->pv_next = npv->pv_next; 840 pool_put(&pv_pool, npv); 841 } 842 } 843 } 844 845 /* 846 * Insert physical page at pa into the given pmap at virtual address va. 847 */ 848 int 849 pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) 850 { 851 int s; 852 u_int tte; 853 int managed; 854 855 /* 856 * Have to remove any existing mapping first. 857 */ 858 pmap_remove(pm, va, va + NBPG); 859 860 if (flags & PMAP_WIRED) flags |= prot; 861 862 /* If it has no protections don't bother w/the rest */ 863 if (!(flags & VM_PROT_ALL)) 864 return (0); 865 866 managed = 0; 867 if (vm_physseg_find(atop(pa), NULL) != -1) 868 managed = 1; 869 870 /* 871 * Generate TTE. 872 * 873 * XXXX 874 * 875 * Since the kernel does not handle execution privileges properly, 876 * we will handle read and execute permissions together. 877 */ 878 tte = TTE_PA(pa) | TTE_EX; 879 /* XXXX -- need to support multiple page sizes. */ 880 tte |= TTE_SZ_16K; 881 #ifdef DIAGNOSTIC 882 if ((flags & (PME_NOCACHE | PME_WRITETHROUG)) == 883 (PME_NOCACHE | PME_WRITETHROUG)) 884 panic("pmap_enter: uncached & writethrough\n"); 885 #endif 886 if (flags & PME_NOCACHE) 887 /* Must be I/O mapping */ 888 tte |= TTE_I | TTE_G; 889 #ifdef PPC_4XX_NOCACHE 890 tte |= TTE_I; 891 #else 892 else if (flags & PME_WRITETHROUG) 893 /* Uncached and writethrough are not compatible */ 894 tte |= TTE_W; 895 #endif 896 if (pm == pmap_kernel()) 897 tte |= TTE_ZONE(ZONE_PRIV); 898 else 899 tte |= TTE_ZONE(ZONE_USER); 900 901 if (flags & VM_PROT_WRITE) 902 tte |= TTE_WR; 903 904 /* 905 * Now record mapping for later back-translation. 906 */ 907 if (pmap_initialized && managed) { 908 char *attr; 909 910 if (!pmap_enter_pv(pm, va, pa)) { 911 /* Could not enter pv on a managed page */ 912 return 1; 913 } 914 915 /* Now set attributes. */ 916 attr = pa_to_attr(pa); 917 #ifdef DIAGNOSTIC 918 if (!attr) 919 panic("managed but no attr\n"); 920 #endif 921 if (flags & VM_PROT_ALL) 922 *attr |= PTE_HI_REF; 923 if (flags & VM_PROT_WRITE) 924 *attr |= PTE_HI_CHG; 925 } 926 927 s = splvm(); 928 pm->pm_stats.resident_count++; 929 930 /* Insert page into page table. */ 931 pte_enter(pm, va, tte); 932 933 /* If this is a real fault, enter it in the tlb */ 934 if (tte && ((flags & PMAP_WIRED) == 0)) { 935 ppc4xx_tlb_enter(pm->pm_ctx, va, tte); 936 } 937 splx(s); 938 939 /* Flush the real memory from the instruction cache. */ 940 if ((prot & VM_PROT_EXECUTE) && (tte & TTE_I) == 0) 941 __syncicache((void *)pa, PAGE_SIZE); 942 943 return 0; 944 } 945 946 void 947 pmap_unwire(struct pmap *pm, vaddr_t va) 948 { 949 struct pv_entry *pv, *npv; 950 paddr_t pa; 951 int s = splvm(); 952 953 if (pm == NULL) { 954 return; 955 } 956 957 if (!pmap_extract(pm, va, &pa)) { 958 return; 959 } 960 961 va |= PV_WIRED; 962 963 pv = pa_to_pv(pa); 964 if (!pv) return; 965 966 /* 967 * If it is the first entry on the list, it is actually 968 * in the header and we must copy the following entry up 969 * to the header. Otherwise we must search the list for 970 * the entry. In either case we free the now unused entry. 971 */ 972 for (npv = pv; (npv = pv->pv_next) != NULL; pv = npv) { 973 if (pm == npv->pv_pm && PV_CMPVA(va, npv)) { 974 npv->pv_va &= ~PV_WIRED; 975 break; 976 } 977 } 978 splx(s); 979 } 980 981 void 982 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot) 983 { 984 int s; 985 u_int tte; 986 struct pmap *pm = pmap_kernel(); 987 988 /* 989 * Have to remove any existing mapping first. 990 */ 991 992 /* 993 * Generate TTE. 994 * 995 * XXXX 996 * 997 * Since the kernel does not handle execution privileges properly, 998 * we will handle read and execute permissions together. 999 */ 1000 tte = 0; 1001 if (prot & VM_PROT_ALL) { 1002 1003 tte = TTE_PA(pa) | TTE_EX | TTE_ZONE(ZONE_PRIV); 1004 /* XXXX -- need to support multiple page sizes. */ 1005 tte |= TTE_SZ_16K; 1006 #ifdef DIAGNOSTIC 1007 if ((prot & (PME_NOCACHE | PME_WRITETHROUG)) == 1008 (PME_NOCACHE | PME_WRITETHROUG)) 1009 panic("pmap_kenter_pa: uncached & writethrough\n"); 1010 #endif 1011 if (prot & PME_NOCACHE) 1012 /* Must be I/O mapping */ 1013 tte |= TTE_I | TTE_G; 1014 #ifdef PPC_4XX_NOCACHE 1015 tte |= TTE_I; 1016 #else 1017 else if (prot & PME_WRITETHROUG) 1018 /* Uncached and writethrough are not compatible */ 1019 tte |= TTE_W; 1020 #endif 1021 if (prot & VM_PROT_WRITE) 1022 tte |= TTE_WR; 1023 } 1024 1025 s = splvm(); 1026 pm->pm_stats.resident_count++; 1027 1028 /* Insert page into page table. */ 1029 pte_enter(pm, va, tte); 1030 splx(s); 1031 } 1032 1033 void 1034 pmap_kremove(vaddr_t va, vsize_t len) 1035 { 1036 1037 while (len > 0) { 1038 pte_enter(pmap_kernel(), va, 0); 1039 va += PAGE_SIZE; 1040 len -= PAGE_SIZE; 1041 } 1042 } 1043 1044 /* 1045 * Remove the given range of mapping entries. 1046 */ 1047 void 1048 pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva) 1049 { 1050 int s; 1051 paddr_t pa; 1052 volatile u_int *ptp; 1053 1054 s = splvm(); 1055 while (va < endva) { 1056 1057 if ((ptp = pte_find(pm, va)) && (pa = *ptp)) { 1058 pa = TTE_PA(pa); 1059 pmap_remove_pv(pm, va, pa); 1060 *ptp = 0; 1061 ppc4xx_tlb_flush(va, pm->pm_ctx); 1062 pm->pm_stats.resident_count--; 1063 } 1064 va += NBPG; 1065 } 1066 1067 splx(s); 1068 } 1069 1070 /* 1071 * Get the physical page address for the given pmap/virtual address. 1072 */ 1073 boolean_t 1074 pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap) 1075 { 1076 int seg = STIDX(va); 1077 int ptn = PTIDX(va); 1078 u_int pa = 0; 1079 int s = splvm(); 1080 1081 if (pm->pm_ptbl[seg] && (pa = pm->pm_ptbl[seg][ptn])) { 1082 *pap = TTE_PA(pa) | (va & PGOFSET); 1083 } 1084 splx(s); 1085 return (pa != 0); 1086 } 1087 1088 /* 1089 * Lower the protection on the specified range of this pmap. 1090 * 1091 * There are only two cases: either the protection is going to 0, 1092 * or it is going to read-only. 1093 */ 1094 void 1095 pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot) 1096 { 1097 volatile u_int *ptp; 1098 int s; 1099 1100 if (prot & VM_PROT_READ) { 1101 s = splvm(); 1102 while (sva < eva) { 1103 if ((ptp = pte_find(pm, sva)) != NULL) { 1104 *ptp &= ~TTE_WR; 1105 ppc4xx_tlb_flush(sva, pm->pm_ctx); 1106 } 1107 sva += NBPG; 1108 } 1109 splx(s); 1110 return; 1111 } 1112 pmap_remove(pm, sva, eva); 1113 } 1114 1115 boolean_t 1116 check_attr(struct vm_page *pg, u_int mask, int clear) 1117 { 1118 paddr_t pa = VM_PAGE_TO_PHYS(pg); 1119 int s; 1120 char *attr; 1121 int rv; 1122 1123 /* 1124 * First modify bits in cache. 1125 */ 1126 s = splvm(); 1127 attr = pa_to_attr(pa); 1128 if (attr == NULL) 1129 return FALSE; 1130 1131 rv = ((*attr & mask) != 0); 1132 if (clear) 1133 *attr &= ~mask; 1134 1135 splx(s); 1136 return rv; 1137 } 1138 1139 1140 /* 1141 * Lower the protection on the specified physical page. 1142 * 1143 * There are only two cases: either the protection is going to 0, 1144 * or it is going to read-only. 1145 */ 1146 void 1147 pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 1148 { 1149 paddr_t pa = VM_PAGE_TO_PHYS(pg); 1150 vaddr_t va; 1151 struct pv_entry *pvh, *pv, *npv; 1152 struct pmap *pm; 1153 1154 pvh = pa_to_pv(pa); 1155 if (pvh == NULL) 1156 return; 1157 1158 /* Handle extra pvs which may be deleted in the operation */ 1159 for (pv = pvh->pv_next; pv; pv = npv) { 1160 npv = pv->pv_next; 1161 1162 pm = pv->pv_pm; 1163 va = pv->pv_va; 1164 pmap_protect(pm, va, va+NBPG, prot); 1165 } 1166 /* Now check the head pv */ 1167 if (pvh->pv_pm) { 1168 pv = pvh; 1169 pm = pv->pv_pm; 1170 va = pv->pv_va; 1171 pmap_protect(pm, va, va+NBPG, prot); 1172 } 1173 } 1174 1175 /* 1176 * Activate the address space for the specified process. If the process 1177 * is the current process, load the new MMU context. 1178 */ 1179 void 1180 pmap_activate(struct proc *p) 1181 { 1182 #if 0 1183 struct pcb *pcb = &p->p_addr->u_pcb; 1184 pmap_t pmap = p->p_vmspace->vm_map.pmap; 1185 1186 /* 1187 * XXX Normally performed in cpu_fork(). 1188 */ 1189 printf("pmap_activate(%p), pmap=%p\n",p,pmap); 1190 if (pcb->pcb_pm != pmap) { 1191 pcb->pcb_pm = pmap; 1192 (void) pmap_extract(pmap_kernel(), (vaddr_t)pcb->pcb_pm, 1193 (paddr_t *)&pcb->pcb_pmreal); 1194 } 1195 1196 if (p == curproc) { 1197 /* Store pointer to new current pmap. */ 1198 curpm = pcb->pcb_pmreal; 1199 } 1200 #endif 1201 } 1202 1203 /* 1204 * Deactivate the specified process's address space. 1205 */ 1206 void 1207 pmap_deactivate(struct proc *p) 1208 { 1209 } 1210 1211 /* 1212 * Synchronize caches corresponding to [addr, addr+len) in p. 1213 */ 1214 void 1215 pmap_procwr(struct proc *p, vaddr_t va, size_t len) 1216 { 1217 struct pmap *pm = p->p_vmspace->vm_map.pmap; 1218 int msr, ctx, opid; 1219 1220 1221 /* 1222 * Need to turn off IMMU and switch to user context. 1223 * (icbi uses DMMU). 1224 */ 1225 if (!(ctx = pm->pm_ctx)) { 1226 /* No context -- assign it one */ 1227 ctx_alloc(pm); 1228 ctx = pm->pm_ctx; 1229 } 1230 __asm __volatile("mfmsr %0;" 1231 "li %1, 0x20;" 1232 "andc %1,%0,%1;" 1233 "mtmsr %1;" 1234 "sync;isync;" 1235 "mfpid %1;" 1236 "mtpid %2;" 1237 "sync; isync;" 1238 "1:" 1239 "dcbf 0,%3;" 1240 "icbi 0,%3;" 1241 "addi %3,%3,32;" 1242 "addic. %4,%4,-32;" 1243 "bge 1b;" 1244 "mtpid %1;" 1245 "mtmsr %0;" 1246 "sync; isync" 1247 : "=&r" (msr), "=&r" (opid) 1248 : "r" (ctx), "r" (va), "r" (len)); 1249 } 1250 1251 1252 /* This has to be done in real mode !!! */ 1253 void 1254 ppc4xx_tlb_flush(vaddr_t va, int pid) 1255 { 1256 u_long i, found; 1257 u_long msr; 1258 1259 /* If there's no context then it can't be mapped. */ 1260 if (!pid) return; 1261 1262 asm("mfpid %1;" /* Save PID */ 1263 "mfmsr %2;" /* Save MSR */ 1264 "li %0,0;" /* Now clear MSR */ 1265 "mtmsr %0;" 1266 "mtpid %4;" /* Set PID */ 1267 "sync;" 1268 "tlbsx. %0,0,%3;" /* Search TLB */ 1269 "sync;" 1270 "mtpid %1;" /* Restore PID */ 1271 "mtmsr %2;" /* Restore MSR */ 1272 "sync;isync;" 1273 "li %1,1;" 1274 "beq 1f;" 1275 "li %1,0;" 1276 "1:" 1277 : "=&r" (i), "=&r" (found), "=&r" (msr) 1278 : "r" (va), "r" (pid)); 1279 if (found && !TLB_LOCKED(i)) { 1280 1281 /* Now flush translation */ 1282 asm volatile( 1283 "tlbwe %0,%1,0;" 1284 "sync;isync;" 1285 : : "r" (0), "r" (i)); 1286 1287 tlb_info[i].ti_ctx = 0; 1288 tlb_info[i].ti_flags = 0; 1289 tlbnext = i; 1290 /* Successful flushes */ 1291 tlbflush_ev.ev_count++; 1292 } 1293 } 1294 1295 void 1296 ppc4xx_tlb_flush_all(void) 1297 { 1298 u_long i; 1299 1300 for (i = 0; i < NTLB; i++) 1301 if (!TLB_LOCKED(i)) { 1302 asm volatile( 1303 "tlbwe %0,%1,0;" 1304 "sync;isync;" 1305 : : "r" (0), "r" (i)); 1306 tlb_info[i].ti_ctx = 0; 1307 tlb_info[i].ti_flags = 0; 1308 } 1309 1310 asm volatile("sync;isync"); 1311 } 1312 1313 /* Find a TLB entry to evict. */ 1314 static int 1315 ppc4xx_tlb_find_victim(void) 1316 { 1317 int flags; 1318 1319 for (;;) { 1320 if (++tlbnext >= NTLB) 1321 tlbnext = TLB_NRESERVED; 1322 flags = tlb_info[tlbnext].ti_flags; 1323 if (!(flags & TLBF_USED) || 1324 (flags & (TLBF_LOCKED | TLBF_REF)) == 0) { 1325 u_long va, stack = (u_long)&va; 1326 1327 if (!((tlb_info[tlbnext].ti_va ^ stack) & (~PGOFSET)) && 1328 (tlb_info[tlbnext].ti_ctx == KERNEL_PID) && 1329 (flags & TLBF_USED)) { 1330 /* Kernel stack page */ 1331 flags |= TLBF_USED; 1332 tlb_info[tlbnext].ti_flags = flags; 1333 } else { 1334 /* Found it! */ 1335 return (tlbnext); 1336 } 1337 } else { 1338 tlb_info[tlbnext].ti_flags = (flags & ~TLBF_REF); 1339 } 1340 } 1341 } 1342 1343 void 1344 ppc4xx_tlb_enter(int ctx, vaddr_t va, u_int pte) 1345 { 1346 u_long th, tl, idx; 1347 tlbpid_t pid; 1348 u_short msr; 1349 int s; 1350 1351 tlbenter_ev.ev_count++; 1352 1353 th = (va & TLB_EPN_MASK) | 1354 (((pte & TTE_SZ_MASK) >> TTE_SZ_SHIFT) << TLB_SIZE_SHFT) | 1355 TLB_VALID; 1356 tl = pte & ~(TTE_SZ_MASK|TTE_ENDIAN); 1357 1358 s = splhigh(); 1359 idx = ppc4xx_tlb_find_victim(); 1360 1361 #ifdef DIAGNOSTIC 1362 if ((idx < TLB_NRESERVED) || (idx >= NTLB)) { 1363 panic("ppc4xx_tlb_enter: repacing entry %ld\n", idx); 1364 } 1365 #endif 1366 1367 tlb_info[idx].ti_va = (va & TLB_EPN_MASK); 1368 tlb_info[idx].ti_ctx = ctx; 1369 tlb_info[idx].ti_flags = TLBF_USED | TLBF_REF; 1370 1371 asm volatile( 1372 "mfmsr %0;" /* Save MSR */ 1373 "li %1,0;" 1374 "tlbwe %1,%3,0;" /* Invalidate old entry. */ 1375 "mtmsr %1;" /* Clear MSR */ 1376 "mfpid %1;" /* Save old PID */ 1377 "mtpid %2;" /* Load translation ctx */ 1378 "sync; isync;" 1379 #ifdef DEBUG 1380 "andi. %3,%3,63;" 1381 "tweqi %3,0;" /* XXXXX DEBUG trap on index 0 */ 1382 #endif 1383 "tlbwe %4,%3,1; tlbwe %5,%3,0;" /* Set TLB */ 1384 "sync; isync;" 1385 "mtpid %1; mtmsr %0;" /* Restore PID and MSR */ 1386 "sync; isync;" 1387 : "=&r" (msr), "=&r" (pid) 1388 : "r" (ctx), "r" (idx), "r" (tl), "r" (th)); 1389 splx(s); 1390 } 1391 1392 void 1393 ppc4xx_tlb_unpin(int i) 1394 { 1395 1396 if (i == -1) 1397 for (i = 0; i < TLB_NRESERVED; i++) 1398 tlb_info[i].ti_flags &= ~TLBF_LOCKED; 1399 else 1400 tlb_info[i].ti_flags &= ~TLBF_LOCKED; 1401 } 1402 1403 void 1404 ppc4xx_tlb_init(void) 1405 { 1406 int i; 1407 1408 /* Mark reserved TLB entries */ 1409 for (i = 0; i < TLB_NRESERVED; i++) { 1410 tlb_info[i].ti_flags = TLBF_LOCKED | TLBF_USED; 1411 tlb_info[i].ti_ctx = KERNEL_PID; 1412 } 1413 1414 /* Setup security zones */ 1415 /* Z0 - accessible by kernel only if TLB entry permissions allow 1416 * Z1,Z2 - access is controlled by TLB entry permissions 1417 * Z3 - full access regardless of TLB entry permissions 1418 */ 1419 1420 asm volatile( 1421 "mtspr %0,%1;" 1422 "sync;" 1423 :: "K"(SPR_ZPR), "r" (0x1b000000)); 1424 } 1425 1426 1427 /* 1428 * We should pass the ctx in from trap code. 1429 */ 1430 int 1431 pmap_tlbmiss(vaddr_t va, int ctx) 1432 { 1433 volatile u_int *pte; 1434 u_long tte; 1435 1436 tlbmiss_ev.ev_count++; 1437 1438 /* 1439 * XXXX We will reserve 0-0x80000000 for va==pa mappings. 1440 */ 1441 if (ctx != KERNEL_PID || (va & 0x80000000)) { 1442 pte = pte_find((struct pmap *)ctxbusy[ctx], va); 1443 if (pte == NULL) { 1444 /* Map unmanaged addresses directly for kernel access */ 1445 return 1; 1446 } 1447 tte = *pte; 1448 if (tte == 0) { 1449 return 1; 1450 } 1451 } else { 1452 /* Create a 16MB writeable mapping. */ 1453 #ifdef PPC_4XX_NOCACHE 1454 tte = TTE_PA(va) | TTE_ZONE(ZONE_PRIV) | TTE_SZ_16M | TTE_I | TTE_WR; 1455 #else 1456 tte = TTE_PA(va) | TTE_ZONE(ZONE_PRIV) | TTE_SZ_16M | TTE_WR; 1457 #endif 1458 } 1459 tlbhit_ev.ev_count++; 1460 ppc4xx_tlb_enter(ctx, va, tte); 1461 1462 return 0; 1463 } 1464 1465 /* 1466 * Flush all the entries matching a context from the TLB. 1467 */ 1468 static int 1469 ctx_flush(int cnum) 1470 { 1471 int i; 1472 1473 /* We gotta steal this context */ 1474 for (i = TLB_NRESERVED; i < NTLB; i++) { 1475 if (tlb_info[i].ti_ctx == cnum) { 1476 /* Can't steal ctx if it has a locked entry. */ 1477 if (TLB_LOCKED(i)) { 1478 #ifdef DIAGNOSTIC 1479 printf("ctx_flush: can't invalidate " 1480 "locked mapping %d " 1481 "for context %d\n", i, cnum); 1482 Debugger(); 1483 #endif 1484 return (1); 1485 } 1486 #ifdef DIAGNOSTIC 1487 if (i < TLB_NRESERVED) 1488 panic("TLB entry %d not locked\n", i); 1489 #endif 1490 /* Invalidate particular TLB entry regardless of locked status */ 1491 asm volatile("tlbwe %0,%1,0" : :"r"(0),"r"(i)); 1492 tlb_info[i].ti_flags = 0; 1493 } 1494 } 1495 return (0); 1496 } 1497 1498 /* 1499 * Allocate a context. If necessary, steal one from someone else. 1500 * 1501 * The new context is flushed from the TLB before returning. 1502 */ 1503 int 1504 ctx_alloc(struct pmap *pm) 1505 { 1506 int s, cnum; 1507 static int next = MINCTX; 1508 1509 if (pm == pmap_kernel()) { 1510 #ifdef DIAGNOSTIC 1511 printf("ctx_alloc: kernel pmap!\n"); 1512 #endif 1513 return (0); 1514 } 1515 s = splvm(); 1516 1517 /* Find a likely context. */ 1518 cnum = next; 1519 do { 1520 if ((++cnum) > NUMCTX) 1521 cnum = MINCTX; 1522 } while (ctxbusy[cnum] != NULL && cnum != next); 1523 1524 /* Now clean it out */ 1525 oops: 1526 if (cnum < MINCTX) 1527 cnum = MINCTX; /* Never steal ctx 0 or 1 */ 1528 if (ctx_flush(cnum)) { 1529 /* oops -- something's wired. */ 1530 if ((++cnum) > NUMCTX) 1531 cnum = MINCTX; 1532 goto oops; 1533 } 1534 1535 if (ctxbusy[cnum]) { 1536 #ifdef DEBUG 1537 /* We should identify this pmap and clear it */ 1538 printf("Warning: stealing context %d\n", cnum); 1539 #endif 1540 ctxbusy[cnum]->pm_ctx = 0; 1541 } 1542 ctxbusy[cnum] = pm; 1543 next = cnum; 1544 splx(s); 1545 pm->pm_ctx = cnum; 1546 1547 return cnum; 1548 } 1549 1550 /* 1551 * Give away a context. 1552 */ 1553 void 1554 ctx_free(struct pmap *pm) 1555 { 1556 int oldctx; 1557 1558 oldctx = pm->pm_ctx; 1559 1560 if (oldctx == 0) 1561 panic("ctx_free: freeing kernel context"); 1562 #ifdef DIAGNOSTIC 1563 if (ctxbusy[oldctx] == 0) 1564 printf("ctx_free: freeing free context %d\n", oldctx); 1565 if (ctxbusy[oldctx] != pm) { 1566 printf("ctx_free: freeing someone esle's context\n " 1567 "ctxbusy[%d] = %p, pm->pm_ctx = %p\n", 1568 oldctx, (void *)(u_long)ctxbusy[oldctx], pm); 1569 Debugger(); 1570 } 1571 #endif 1572 /* We should verify it has not been stolen and reallocated... */ 1573 ctxbusy[oldctx] = NULL; 1574 ctx_flush(oldctx); 1575 } 1576 1577 1578 #ifdef DEBUG 1579 /* 1580 * Test ref/modify handling. 1581 */ 1582 void pmap_testout __P((void)); 1583 void 1584 pmap_testout() 1585 { 1586 vaddr_t va; 1587 volatile int *loc; 1588 int val = 0; 1589 paddr_t pa; 1590 struct vm_page *pg; 1591 int ref, mod; 1592 1593 /* Allocate a page */ 1594 va = (vaddr_t)uvm_km_alloc1(kernel_map, NBPG, 1); 1595 loc = (int*)va; 1596 1597 pmap_extract(pmap_kernel(), va, &pa); 1598 pg = PHYS_TO_VM_PAGE(pa); 1599 pmap_unwire(pmap_kernel(), va); 1600 1601 pmap_remove(pmap_kernel(), va, va+1); 1602 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0); 1603 pmap_update(pmap_kernel()); 1604 1605 /* Now clear reference and modify */ 1606 ref = pmap_clear_reference(pg); 1607 mod = pmap_clear_modify(pg); 1608 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1609 (void *)(u_long)va, (long)pa, 1610 ref, mod); 1611 1612 /* Check it's properly cleared */ 1613 ref = pmap_is_referenced(pg); 1614 mod = pmap_is_modified(pg); 1615 printf("Checking cleared page: ref %d, mod %d\n", 1616 ref, mod); 1617 1618 /* Reference page */ 1619 val = *loc; 1620 1621 ref = pmap_is_referenced(pg); 1622 mod = pmap_is_modified(pg); 1623 printf("Referenced page: ref %d, mod %d val %x\n", 1624 ref, mod, val); 1625 1626 /* Now clear reference and modify */ 1627 ref = pmap_clear_reference(pg); 1628 mod = pmap_clear_modify(pg); 1629 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1630 (void *)(u_long)va, (long)pa, 1631 ref, mod); 1632 1633 /* Modify page */ 1634 *loc = 1; 1635 1636 ref = pmap_is_referenced(pg); 1637 mod = pmap_is_modified(pg); 1638 printf("Modified page: ref %d, mod %d\n", 1639 ref, mod); 1640 1641 /* Now clear reference and modify */ 1642 ref = pmap_clear_reference(pg); 1643 mod = pmap_clear_modify(pg); 1644 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1645 (void *)(u_long)va, (long)pa, 1646 ref, mod); 1647 1648 /* Check it's properly cleared */ 1649 ref = pmap_is_referenced(pg); 1650 mod = pmap_is_modified(pg); 1651 printf("Checking cleared page: ref %d, mod %d\n", 1652 ref, mod); 1653 1654 /* Modify page */ 1655 *loc = 1; 1656 1657 ref = pmap_is_referenced(pg); 1658 mod = pmap_is_modified(pg); 1659 printf("Modified page: ref %d, mod %d\n", 1660 ref, mod); 1661 1662 /* Check pmap_protect() */ 1663 pmap_protect(pmap_kernel(), va, va+1, VM_PROT_READ); 1664 pmap_update(pmap_kernel()); 1665 ref = pmap_is_referenced(pg); 1666 mod = pmap_is_modified(pg); 1667 printf("pmap_protect(VM_PROT_READ): ref %d, mod %d\n", 1668 ref, mod); 1669 1670 /* Now clear reference and modify */ 1671 ref = pmap_clear_reference(pg); 1672 mod = pmap_clear_modify(pg); 1673 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1674 (void *)(u_long)va, (long)pa, 1675 ref, mod); 1676 1677 /* Reference page */ 1678 val = *loc; 1679 1680 ref = pmap_is_referenced(pg); 1681 mod = pmap_is_modified(pg); 1682 printf("Referenced page: ref %d, mod %d val %x\n", 1683 ref, mod, val); 1684 1685 /* Now clear reference and modify */ 1686 ref = pmap_clear_reference(pg); 1687 mod = pmap_clear_modify(pg); 1688 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1689 (void *)(u_long)va, (long)pa, 1690 ref, mod); 1691 1692 /* Modify page */ 1693 #if 0 1694 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0); 1695 pmap_update(pmap_kernel()); 1696 #endif 1697 *loc = 1; 1698 1699 ref = pmap_is_referenced(pg); 1700 mod = pmap_is_modified(pg); 1701 printf("Modified page: ref %d, mod %d\n", 1702 ref, mod); 1703 1704 /* Check pmap_protect() */ 1705 pmap_protect(pmap_kernel(), va, va+1, VM_PROT_NONE); 1706 pmap_update(pmap_kernel()); 1707 ref = pmap_is_referenced(pg); 1708 mod = pmap_is_modified(pg); 1709 printf("pmap_protect(): ref %d, mod %d\n", 1710 ref, mod); 1711 1712 /* Now clear reference and modify */ 1713 ref = pmap_clear_reference(pg); 1714 mod = pmap_clear_modify(pg); 1715 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1716 (void *)(u_long)va, (long)pa, 1717 ref, mod); 1718 1719 /* Reference page */ 1720 val = *loc; 1721 1722 ref = pmap_is_referenced(pg); 1723 mod = pmap_is_modified(pg); 1724 printf("Referenced page: ref %d, mod %d val %x\n", 1725 ref, mod, val); 1726 1727 /* Now clear reference and modify */ 1728 ref = pmap_clear_reference(pg); 1729 mod = pmap_clear_modify(pg); 1730 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1731 (void *)(u_long)va, (long)pa, 1732 ref, mod); 1733 1734 /* Modify page */ 1735 #if 0 1736 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0); 1737 pmap_update(pmap_kernel()); 1738 #endif 1739 *loc = 1; 1740 1741 ref = pmap_is_referenced(pg); 1742 mod = pmap_is_modified(pg); 1743 printf("Modified page: ref %d, mod %d\n", 1744 ref, mod); 1745 1746 /* Check pmap_pag_protect() */ 1747 pmap_page_protect(pg, VM_PROT_READ); 1748 ref = pmap_is_referenced(pg); 1749 mod = pmap_is_modified(pg); 1750 printf("pmap_page_protect(VM_PROT_READ): ref %d, mod %d\n", 1751 ref, mod); 1752 1753 /* Now clear reference and modify */ 1754 ref = pmap_clear_reference(pg); 1755 mod = pmap_clear_modify(pg); 1756 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1757 (void *)(u_long)va, (long)pa, 1758 ref, mod); 1759 1760 /* Reference page */ 1761 val = *loc; 1762 1763 ref = pmap_is_referenced(pg); 1764 mod = pmap_is_modified(pg); 1765 printf("Referenced page: ref %d, mod %d val %x\n", 1766 ref, mod, val); 1767 1768 /* Now clear reference and modify */ 1769 ref = pmap_clear_reference(pg); 1770 mod = pmap_clear_modify(pg); 1771 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1772 (void *)(u_long)va, (long)pa, 1773 ref, mod); 1774 1775 /* Modify page */ 1776 #if 0 1777 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0); 1778 pmap_update(pmap_kernel()); 1779 #endif 1780 *loc = 1; 1781 1782 ref = pmap_is_referenced(pg); 1783 mod = pmap_is_modified(pg); 1784 printf("Modified page: ref %d, mod %d\n", 1785 ref, mod); 1786 1787 /* Check pmap_pag_protect() */ 1788 pmap_page_protect(pg, VM_PROT_NONE); 1789 ref = pmap_is_referenced(pg); 1790 mod = pmap_is_modified(pg); 1791 printf("pmap_page_protect(): ref %d, mod %d\n", 1792 ref, mod); 1793 1794 /* Now clear reference and modify */ 1795 ref = pmap_clear_reference(pg); 1796 mod = pmap_clear_modify(pg); 1797 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1798 (void *)(u_long)va, (long)pa, 1799 ref, mod); 1800 1801 1802 /* Reference page */ 1803 val = *loc; 1804 1805 ref = pmap_is_referenced(pg); 1806 mod = pmap_is_modified(pg); 1807 printf("Referenced page: ref %d, mod %d val %x\n", 1808 ref, mod, val); 1809 1810 /* Now clear reference and modify */ 1811 ref = pmap_clear_reference(pg); 1812 mod = pmap_clear_modify(pg); 1813 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1814 (void *)(u_long)va, (long)pa, 1815 ref, mod); 1816 1817 /* Modify page */ 1818 #if 0 1819 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0); 1820 pmap_update(pmap_kernel()); 1821 #endif 1822 *loc = 1; 1823 1824 ref = pmap_is_referenced(pg); 1825 mod = pmap_is_modified(pg); 1826 printf("Modified page: ref %d, mod %d\n", 1827 ref, mod); 1828 1829 /* Unmap page */ 1830 pmap_remove(pmap_kernel(), va, va+1); 1831 pmap_update(pmap_kernel()); 1832 ref = pmap_is_referenced(pg); 1833 mod = pmap_is_modified(pg); 1834 printf("Unmapped page: ref %d, mod %d\n", ref, mod); 1835 1836 /* Now clear reference and modify */ 1837 ref = pmap_clear_reference(pg); 1838 mod = pmap_clear_modify(pg); 1839 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1840 (void *)(u_long)va, (long)pa, ref, mod); 1841 1842 /* Check it's properly cleared */ 1843 ref = pmap_is_referenced(pg); 1844 mod = pmap_is_modified(pg); 1845 printf("Checking cleared page: ref %d, mod %d\n", 1846 ref, mod); 1847 1848 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 1849 VM_PROT_ALL|PMAP_WIRED); 1850 uvm_km_free(kernel_map, (vaddr_t)va, NBPG); 1851 } 1852 #endif 1853