1 /* $NetBSD: pmap.c,v 1.5 2001/09/11 04:35:43 eeh Exp $ */ 2 3 /* 4 * Copyright 2001 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 40 * Copyright (C) 1995, 1996 TooLs GmbH. 41 * All rights reserved. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. All advertising materials mentioning features or use of this software 52 * must display the following acknowledgement: 53 * This product includes software developed by TooLs GmbH. 54 * 4. The name of TooLs GmbH may not be used to endorse or promote products 55 * derived from this software without specific prior written permission. 56 * 57 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 58 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 59 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 60 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 62 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 63 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 64 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 65 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 66 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 67 */ 68 69 #undef NOCACHE 70 71 #include <sys/param.h> 72 #include <sys/malloc.h> 73 #include <sys/proc.h> 74 #include <sys/user.h> 75 #include <sys/queue.h> 76 #include <sys/systm.h> 77 #include <sys/pool.h> 78 #include <sys/device.h> 79 80 #include <uvm/uvm.h> 81 82 #include <machine/pcb.h> 83 #include <machine/powerpc.h> 84 85 #include <powerpc/spr.h> 86 #include <powerpc/ibm4xx/tlb.h> 87 88 89 #define CACHE_LINE 32 90 91 /* 92 * kernmap is an array of PTEs large enough to map in 93 * 4GB. At 16KB/page it is 256K entries or 2MB. 94 */ 95 #define KERNMAP_SIZE ((0xffffffffU/NBPG)+1) 96 caddr_t kernmap; 97 98 #define MINCTX 2 99 #define NUMCTX 256 100 volatile struct pmap *ctxbusy[NUMCTX]; 101 102 #define TLBF_USED 0x1 103 #define TLBF_REF 0x2 104 #define TLBF_LOCKED 0x4 105 #define TLB_LOCKED(i) (tlb_info[(i)].ti_flags & TLBF_LOCKED) 106 typedef struct tlb_info_s { 107 char ti_flags; 108 char ti_ctx; /* TLB_PID assiciated with the entry */ 109 u_int ti_va; 110 } tlb_info_t; 111 112 volatile tlb_info_t tlb_info[NTLB]; 113 /* We'll use a modified FIFO replacement policy cause it's cheap */ 114 volatile int tlbnext = TLB_NRESERVED; 115 116 u_long dtlb_miss_count = 0; 117 u_long itlb_miss_count = 0; 118 u_long ktlb_miss_count = 0; 119 u_long utlb_miss_count = 0; 120 121 /* Event counters -- XXX type `INTR' so we can see them with vmstat -i */ 122 struct evcnt tlbmiss_ev = EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 123 NULL, "cpu", "tlbmiss"); 124 struct evcnt tlbhit_ev = EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 125 NULL, "cpu", "tlbhit"); 126 struct evcnt tlbflush_ev = EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 127 NULL, "cpu", "tlbflush"); 128 struct evcnt tlbenter_ev = EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 129 NULL, "cpu", "tlbenter"); 130 131 struct pmap kernel_pmap_; 132 133 int physmem; 134 static int npgs; 135 static u_int nextavail; 136 #ifndef MSGBUFADDR 137 extern paddr_t msgbuf_paddr; 138 #endif 139 140 static struct mem_region *mem, *avail; 141 142 /* 143 * This is a cache of referenced/modified bits. 144 * Bits herein are shifted by ATTRSHFT. 145 */ 146 static char *pmap_attrib; 147 148 #define PV_WIRED 0x1 149 #define PV_WIRE(pv) ((pv)->pv_va |= PV_WIRED) 150 #define PV_CMPVA(va,pv) (!(((pv)->pv_va^(va))&(~PV_WIRED))) 151 152 struct pv_entry { 153 struct pv_entry *pv_next; /* Linked list of mappings */ 154 vaddr_t pv_va; /* virtual address of mapping */ 155 struct pmap *pv_pm; 156 }; 157 158 struct pv_entry *pv_table; 159 static struct pool pv_pool; 160 161 static int pmap_initialized; 162 163 static int ctx_flush(int); 164 165 static inline void dcache_flush_page(vaddr_t); 166 static inline void icache_flush_page(vaddr_t); 167 static inline void dcache_flush(vaddr_t, vsize_t); 168 static inline void icache_flush(vaddr_t, vsize_t); 169 170 inline struct pv_entry *pa_to_pv(paddr_t); 171 static inline char *pa_to_attr(paddr_t); 172 173 static inline volatile u_int *pte_find(struct pmap *, vaddr_t); 174 static inline int pte_enter(struct pmap *, vaddr_t, u_int); 175 176 static void pmap_pinit(pmap_t); 177 static void pmap_release(pmap_t); 178 static inline int pmap_enter_pv(struct pmap *, vaddr_t, paddr_t); 179 static void pmap_remove_pv(struct pmap *, vaddr_t, paddr_t); 180 181 /* 182 * These small routines may have to be replaced, 183 * if/when we support processors other that the 604. 184 */ 185 186 static inline void 187 dcache_flush_page(vaddr_t va) 188 { 189 int i; 190 191 for (i = 0; i < NBPG; i += CACHE_LINE) 192 asm volatile("dcbf %0,%1" : : "r" (va), "r" (i)); 193 asm volatile("sync;isync" : : ); 194 } 195 196 static inline void 197 icache_flush_page(vaddr_t va) 198 { 199 int i; 200 201 for (i = 0; i < NBPG; i += CACHE_LINE) 202 asm volatile("icbi %0,%1" : : "r" (va), "r" (i)); 203 asm volatile("sync;isync" : : ); 204 } 205 206 static inline void 207 dcache_flush(vaddr_t va, vsize_t len) 208 { 209 int i; 210 211 if (len == 0) 212 return; 213 214 /* Make sure we flush all cache lines */ 215 len += va & (CACHE_LINE-1); 216 for (i = 0; i < len; i += CACHE_LINE) 217 asm volatile("dcbf %0,%1" : : "r" (va), "r" (i)); 218 asm volatile("sync;isync" : : ); 219 } 220 221 static inline void 222 icache_flush(vaddr_t va, vsize_t len) 223 { 224 int i; 225 226 if (len == 0) 227 return; 228 229 /* Make sure we flush all cache lines */ 230 len += va & (CACHE_LINE-1); 231 for (i = 0; i < len; i += CACHE_LINE) 232 asm volatile("icbi %0,%1" : : "r" (va), "r" (i)); 233 asm volatile("sync;isync" : : ); 234 } 235 236 inline struct pv_entry * 237 pa_to_pv(paddr_t pa) 238 { 239 int bank, pg; 240 241 bank = vm_physseg_find(atop(pa), &pg); 242 if (bank == -1) 243 return NULL; 244 return &vm_physmem[bank].pmseg.pvent[pg]; 245 } 246 247 static inline char * 248 pa_to_attr(paddr_t pa) 249 { 250 int bank, pg; 251 252 bank = vm_physseg_find(atop(pa), &pg); 253 if (bank == -1) 254 return NULL; 255 return &vm_physmem[bank].pmseg.attrs[pg]; 256 } 257 258 /* 259 * Insert PTE into page table. 260 */ 261 int 262 pte_enter(struct pmap *pm, vaddr_t va, u_int pte) 263 { 264 int seg = STIDX(va); 265 int ptn = PTIDX(va); 266 paddr_t pa; 267 268 if (!pm->pm_ptbl[seg]) { 269 /* Don't allocate a page to clear a non-existent mapping. */ 270 if (!pte) return (1); 271 /* Allocate a page XXXX this will sleep! */ 272 pa = 0; 273 pm->pm_ptbl[seg] = (uint *)uvm_km_alloc1(kernel_map, NBPG, 1); 274 } 275 pm->pm_ptbl[seg][ptn] = pte; 276 277 /* Flush entry. */ 278 ppc4xx_tlb_flush(va, pm->pm_ctx); 279 return (1); 280 } 281 282 /* 283 * Get a pointer to a PTE in a page table. 284 */ 285 volatile u_int * 286 pte_find(struct pmap *pm, vaddr_t va) 287 { 288 int seg = STIDX(va); 289 int ptn = PTIDX(va); 290 291 if (pm->pm_ptbl[seg]) 292 return (&pm->pm_ptbl[seg][ptn]); 293 294 return (NULL); 295 } 296 297 /* 298 * This is called during initppc, before the system is really initialized. 299 */ 300 void 301 pmap_bootstrap(u_int kernelstart, u_int kernelend) 302 { 303 struct mem_region *mp, *mp1; 304 int cnt, i; 305 u_int s, e, sz; 306 307 /* 308 * Allocate the kernel page table at the end of 309 * kernel space so it's in the locked TTE. 310 */ 311 kernmap = (caddr_t)kernelend; 312 // kernelend += KERNMAP_SIZE*sizeof(struct pte); 313 314 /* 315 * Initialize kernel page table. 316 */ 317 // memset(kernmap, 0, KERNMAP_SIZE*sizeof(struct pte)); 318 for (i = 0; i < STSZ; i++) { 319 pmap_kernel()->pm_ptbl[i] = 0; // (u_int *)(kernmap + i*NBPG); 320 } 321 ctxbusy[0] = ctxbusy[1] = pmap_kernel(); 322 323 /* 324 * Announce page-size to the VM-system 325 */ 326 uvmexp.pagesize = NBPG; 327 uvm_setpagesize(); 328 329 /* 330 * Get memory. 331 */ 332 mem_regions(&mem, &avail); 333 for (mp = mem; mp->size; mp++) { 334 physmem += btoc(mp->size); 335 printf("+%lx,",mp->size); 336 } 337 printf("\n"); 338 ppc4xx_tlb_init(); 339 /* 340 * Count the number of available entries. 341 */ 342 for (cnt = 0, mp = avail; mp->size; mp++) 343 cnt++; 344 345 /* 346 * Page align all regions. 347 * Non-page aligned memory isn't very interesting to us. 348 * Also, sort the entries for ascending addresses. 349 */ 350 kernelstart &= ~PGOFSET; 351 kernelend = (kernelend + PGOFSET) & ~PGOFSET; 352 for (mp = avail; mp->size; mp++) { 353 s = mp->start; 354 e = mp->start + mp->size; 355 printf("%08x-%08x -> ",s,e); 356 /* 357 * Check whether this region holds all of the kernel. 358 */ 359 if (s < kernelstart && e > kernelend) { 360 avail[cnt].start = kernelend; 361 avail[cnt++].size = e - kernelend; 362 e = kernelstart; 363 } 364 /* 365 * Look whether this regions starts within the kernel. 366 */ 367 if (s >= kernelstart && s < kernelend) { 368 if (e <= kernelend) 369 goto empty; 370 s = kernelend; 371 } 372 /* 373 * Now look whether this region ends within the kernel. 374 */ 375 if (e > kernelstart && e <= kernelend) { 376 if (s >= kernelstart) 377 goto empty; 378 e = kernelstart; 379 } 380 /* 381 * Now page align the start and size of the region. 382 */ 383 s = round_page(s); 384 e = trunc_page(e); 385 if (e < s) 386 e = s; 387 sz = e - s; 388 printf("%08x-%08x = %x\n",s,e,sz); 389 /* 390 * Check whether some memory is left here. 391 */ 392 if (sz == 0) { 393 empty: 394 memmove(mp, mp + 1, 395 (cnt - (mp - avail)) * sizeof *mp); 396 cnt--; 397 mp--; 398 continue; 399 } 400 /* 401 * Do an insertion sort. 402 */ 403 npgs += btoc(sz); 404 for (mp1 = avail; mp1 < mp; mp1++) 405 if (s < mp1->start) 406 break; 407 if (mp1 < mp) { 408 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1); 409 mp1->start = s; 410 mp1->size = sz; 411 } else { 412 mp->start = s; 413 mp->size = sz; 414 } 415 } 416 417 /* 418 * We cannot do pmap_steal_memory here, 419 * since we don't run with translation enabled yet. 420 */ 421 #ifndef MSGBUFADDR 422 /* 423 * allow for msgbuf 424 */ 425 sz = round_page(MSGBUFSIZE); 426 mp = NULL; 427 for (mp1 = avail; mp1->size; mp1++) 428 if (mp1->size >= sz) 429 mp = mp1; 430 if (mp == NULL) 431 panic("not enough memory?"); 432 433 npgs -= btoc(sz); 434 msgbuf_paddr = mp->start + mp->size - sz; 435 mp->size -= sz; 436 if (mp->size <= 0) 437 memmove(mp, mp + 1, (cnt - (mp - avail)) * sizeof *mp); 438 #endif 439 440 printf("Loading pages\n"); 441 for (mp = avail; mp->size; mp++) 442 uvm_page_physload(atop(mp->start), atop(mp->start + mp->size), 443 atop(mp->start), atop(mp->start + mp->size), 444 VM_FREELIST_DEFAULT); 445 446 /* 447 * Initialize kernel pmap and hardware. 448 */ 449 /* Setup TLB pid allocator so it knows we alreadu using PID 1 */ 450 pmap_kernel()->pm_ctx = KERNEL_PID; 451 nextavail = avail->start; 452 453 454 evcnt_attach_static(&tlbhit_ev); 455 evcnt_attach_static(&tlbmiss_ev); 456 evcnt_attach_static(&tlbflush_ev); 457 evcnt_attach_static(&tlbenter_ev); 458 printf("Done\n"); 459 } 460 461 /* 462 * Restrict given range to physical memory 463 * 464 * (Used by /dev/mem) 465 */ 466 void 467 pmap_real_memory(paddr_t *start, psize_t *size) 468 { 469 struct mem_region *mp; 470 471 for (mp = mem; mp->size; mp++) { 472 if (*start + *size > mp->start && 473 *start < mp->start + mp->size) { 474 if (*start < mp->start) { 475 *size -= mp->start - *start; 476 *start = mp->start; 477 } 478 if (*start + *size > mp->start + mp->size) 479 *size = mp->start + mp->size - *start; 480 return; 481 } 482 } 483 *size = 0; 484 } 485 486 /* 487 * Initialize anything else for pmap handling. 488 * Called during vm_init(). 489 */ 490 void 491 pmap_init(void) 492 { 493 struct pv_entry *pv; 494 vsize_t sz; 495 vaddr_t addr; 496 int i, s; 497 int bank; 498 char *attr; 499 500 sz = (vsize_t)((sizeof(struct pv_entry) + 1) * npgs); 501 sz = round_page(sz); 502 addr = uvm_km_zalloc(kernel_map, sz); 503 s = splvm(); 504 pv = pv_table = (struct pv_entry *)addr; 505 for (i = npgs; --i >= 0;) 506 pv++->pv_pm = NULL; 507 pmap_attrib = (char *)pv; 508 memset(pv, 0, npgs); 509 510 pv = pv_table; 511 attr = pmap_attrib; 512 for (bank = 0; bank < vm_nphysseg; bank++) { 513 sz = vm_physmem[bank].end - vm_physmem[bank].start; 514 vm_physmem[bank].pmseg.pvent = pv; 515 vm_physmem[bank].pmseg.attrs = attr; 516 pv += sz; 517 attr += sz; 518 } 519 520 pmap_initialized = 1; 521 splx(s); 522 523 /* Setup a pool for additional pvlist structures */ 524 pool_init(&pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pv_entry", 0, 525 NULL, NULL, 0); 526 } 527 528 /* 529 * How much virtual space is available to the kernel? 530 */ 531 void 532 pmap_virtual_space(vaddr_t *start, vaddr_t *end) 533 { 534 535 #if 0 536 /* 537 * Reserve one segment for kernel virtual memory 538 */ 539 *start = (vaddr_t)(KERNEL_SR << ADDR_SR_SHFT); 540 *end = *start + SEGMENT_LENGTH; 541 #else 542 *start = (vaddr_t) VM_MIN_KERNEL_ADDRESS; 543 *end = (vaddr_t) VM_MAX_KERNEL_ADDRESS; 544 #endif 545 } 546 547 #ifdef PMAP_GROWKERNEL 548 /* 549 * Preallocate kernel page tables to a specified VA. 550 * This simply loops through the first TTE for each 551 * page table from the beginning of the kernel pmap, 552 * reads the entry, and if the result is 553 * zero (either invalid entry or no page table) it stores 554 * a zero there, populating page tables in the process. 555 * This is not the most efficient technique but i don't 556 * expect it to be called that often. 557 */ 558 extern struct vm_page *vm_page_alloc1 __P((void)); 559 extern void vm_page_free1 __P((struct vm_page *)); 560 561 vaddr_t kbreak = VM_MIN_KERNEL_ADDRESS; 562 563 vaddr_t 564 pmap_growkernel(maxkvaddr) 565 vaddr_t maxkvaddr; 566 { 567 int s; 568 int seg; 569 paddr_t pg; 570 struct pmap *pm = pmap_kernel(); 571 572 s = splvm(); 573 574 /* Align with the start of a page table */ 575 for (kbreak &= ~(PTMAP-1); kbreak < maxkvaddr; 576 kbreak += PTMAP) { 577 seg = STIDX(kbreak); 578 579 if (pte_find(pm, kbreak)) continue; 580 581 if (uvm.page_init_done) { 582 pg = (paddr_t)VM_PAGE_TO_PHYS(vm_page_alloc1()); 583 } else { 584 if (!uvm_page_physget(&pg)) 585 panic("pmap_growkernel: no memory"); 586 } 587 if (!pg) panic("pmap_growkernel: no pages"); 588 pmap_zero_page((paddr_t)pg); 589 590 /* XXX This is based on all phymem being addressable */ 591 pm->pm_ptbl[seg] = (u_int *)pg; 592 } 593 splx(s); 594 return (kbreak); 595 } 596 597 /* 598 * vm_page_alloc1: 599 * 600 * Allocate and return a memory cell with no associated object. 601 */ 602 struct vm_page * 603 vm_page_alloc1() 604 { 605 struct vm_page *pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); 606 if (pg) { 607 pg->wire_count = 1; /* no mappings yet */ 608 pg->flags &= ~PG_BUSY; /* never busy */ 609 } 610 return pg; 611 } 612 613 /* 614 * vm_page_free1: 615 * 616 * Returns the given page to the free list, 617 * disassociating it with any VM object. 618 * 619 * Object and page must be locked prior to entry. 620 */ 621 void 622 vm_page_free1(mem) 623 struct vm_page *mem; 624 { 625 if (mem->flags != (PG_CLEAN|PG_FAKE)) { 626 printf("Freeing invalid page %p\n", mem); 627 printf("pa = %llx\n", (unsigned long long)VM_PAGE_TO_PHYS(mem)); 628 Debugger(); 629 return; 630 } 631 mem->flags |= PG_BUSY; 632 mem->wire_count = 0; 633 uvm_pagefree(mem); 634 } 635 #endif 636 637 /* 638 * Create and return a physical map. 639 */ 640 struct pmap * 641 pmap_create(void) 642 { 643 struct pmap *pm; 644 645 pm = (struct pmap *)malloc(sizeof *pm, M_VMPMAP, M_WAITOK); 646 memset((caddr_t)pm, 0, sizeof *pm); 647 pmap_pinit(pm); 648 return pm; 649 } 650 651 /* 652 * Initialize a preallocated and zeroed pmap structure. 653 */ 654 void 655 pmap_pinit(struct pmap *pm) 656 { 657 int i; 658 659 /* 660 * Allocate some segment registers for this pmap. 661 */ 662 pm->pm_refs = 1; 663 for (i = 0; i < STSZ; i++) 664 pm->pm_ptbl[i] = NULL; 665 } 666 667 /* 668 * Add a reference to the given pmap. 669 */ 670 void 671 pmap_reference(struct pmap *pm) 672 { 673 674 pm->pm_refs++; 675 } 676 677 /* 678 * Retire the given pmap from service. 679 * Should only be called if the map contains no valid mappings. 680 */ 681 void 682 pmap_destroy(struct pmap *pm) 683 { 684 685 if (--pm->pm_refs == 0) { 686 pmap_release(pm); 687 free((caddr_t)pm, M_VMPMAP); 688 } 689 } 690 691 /* 692 * Release any resources held by the given physical map. 693 * Called when a pmap initialized by pmap_pinit is being released. 694 */ 695 static void 696 pmap_release(struct pmap *pm) 697 { 698 int i; 699 700 for (i = 0; i < STSZ; i++) 701 if (pm->pm_ptbl[i]) { 702 uvm_km_free(kernel_map, (vaddr_t)pm->pm_ptbl[i], NBPG); 703 pm->pm_ptbl[i] = NULL; 704 } 705 if (pm->pm_ctx) ctx_free(pm); 706 } 707 708 /* 709 * Copy the range specified by src_addr/len 710 * from the source map to the range dst_addr/len 711 * in the destination map. 712 * 713 * This routine is only advisory and need not do anything. 714 */ 715 void 716 pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap, vaddr_t dst_addr, 717 vsize_t len, vaddr_t src_addr) 718 { 719 } 720 721 /* 722 * Require that all active physical maps contain no 723 * incorrect entries NOW. 724 */ 725 void 726 pmap_update(struct pmap *pmap) 727 { 728 } 729 730 /* 731 * Garbage collects the physical map system for 732 * pages which are no longer used. 733 * Success need not be guaranteed -- that is, there 734 * may well be pages which are not referenced, but 735 * others may be collected. 736 * Called by the pageout daemon when pages are scarce. 737 */ 738 void 739 pmap_collect(struct pmap *pm) 740 { 741 } 742 743 /* 744 * Fill the given physical page with zeroes. 745 */ 746 void 747 pmap_zero_page(paddr_t pa) 748 { 749 750 #ifdef NOCACHE 751 memset((caddr_t)pa, 0, NBPG); 752 #else 753 int i; 754 755 for (i = NBPG/CACHELINESIZE; i > 0; i--) { 756 __asm __volatile ("dcbz 0,%0" :: "r"(pa)); 757 pa += CACHELINESIZE; 758 } 759 #endif 760 } 761 762 /* 763 * Copy the given physical source page to its destination. 764 */ 765 void 766 pmap_copy_page(paddr_t src, paddr_t dst) 767 { 768 769 memcpy((caddr_t)dst, (caddr_t)src, NBPG); 770 dcache_flush_page(dst); 771 } 772 773 /* 774 * This returns whether this is the first mapping of a page. 775 */ 776 static inline int 777 pmap_enter_pv(struct pmap *pm, vaddr_t va, paddr_t pa) 778 { 779 struct pv_entry *pv, *npv = NULL; 780 int s; 781 782 if (!pmap_initialized) 783 return 0; 784 785 s = splvm(); 786 787 pv = pa_to_pv(pa); 788 if (!pv->pv_pm) { 789 /* 790 * No entries yet, use header as the first entry. 791 */ 792 pv->pv_va = va; 793 pv->pv_pm = pm; 794 pv->pv_next = NULL; 795 } else { 796 /* 797 * There is at least one other VA mapping this page. 798 * Place this entry after the header. 799 */ 800 npv = pool_get(&pv_pool, PR_WAITOK); 801 if (!npv) return (0); 802 npv->pv_va = va; 803 npv->pv_pm = pm; 804 npv->pv_next = pv->pv_next; 805 pv->pv_next = npv; 806 } 807 splx(s); 808 return (1); 809 } 810 811 static void 812 pmap_remove_pv(struct pmap *pm, vaddr_t va, paddr_t pa) 813 { 814 struct pv_entry *pv, *npv; 815 816 /* 817 * Remove from the PV table. 818 */ 819 pv = pa_to_pv(pa); 820 if (!pv) return; 821 822 /* 823 * If it is the first entry on the list, it is actually 824 * in the header and we must copy the following entry up 825 * to the header. Otherwise we must search the list for 826 * the entry. In either case we free the now unused entry. 827 */ 828 if (pm == pv->pv_pm && PV_CMPVA(va, pv)) { 829 if ((npv = pv->pv_next)) { 830 *pv = *npv; 831 pool_put(&pv_pool, npv); 832 } else 833 pv->pv_pm = NULL; 834 } else { 835 for (; (npv = pv->pv_next) != NULL; pv = npv) 836 if (pm == npv->pv_pm && PV_CMPVA(va, npv)) 837 break; 838 if (npv) { 839 pv->pv_next = npv->pv_next; 840 pool_put(&pv_pool, npv); 841 } 842 } 843 } 844 845 /* 846 * Insert physical page at pa into the given pmap at virtual address va. 847 */ 848 int 849 pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) 850 { 851 int s; 852 u_int tte; 853 int managed; 854 855 /* 856 * Have to remove any existing mapping first. 857 */ 858 pmap_remove(pm, va, va + NBPG); 859 860 if (flags & PMAP_WIRED) flags |= prot; 861 862 /* If it has no protections don't bother w/the rest */ 863 if (!(flags & VM_PROT_ALL)) 864 return (0); 865 866 managed = 0; 867 if (vm_physseg_find(atop(pa), NULL) != -1) 868 managed = 1; 869 870 /* 871 * Generate TTE. 872 * 873 * XXXX 874 * 875 * Since the kernel does not handle execution privileges properly, 876 * we will handle read and execute permissions together. 877 */ 878 tte = TTE_PA(pa) | TTE_EX; 879 /* XXXX -- need to support multiple page sizes. */ 880 tte |= TTE_SZ_16K; 881 #ifdef DIAGNOSTIC 882 if ((flags & (PME_NOCACHE | PME_WRITETHROUG)) == 883 (PME_NOCACHE | PME_WRITETHROUG)) 884 panic("pmap_enter: uncached & writethrough\n"); 885 #endif 886 if (flags & PME_NOCACHE) 887 /* Must be I/O mapping */ 888 tte |= TTE_I | TTE_G; 889 #ifdef NOCACHE 890 tte |= TTE_I; 891 #else 892 else if (flags & PME_WRITETHROUG) 893 /* Uncached and writethrough are not compatible */ 894 tte |= TTE_W; 895 #endif 896 if (pm == pmap_kernel()) 897 tte |= TTE_ZONE(ZONE_PRIV); 898 else 899 tte |= TTE_ZONE(ZONE_USER); 900 901 if (flags & VM_PROT_WRITE) 902 tte |= TTE_WR; 903 904 /* 905 * Now record mapping for later back-translation. 906 */ 907 if (pmap_initialized && managed) { 908 char *attr; 909 910 if (!pmap_enter_pv(pm, va, pa)) { 911 /* Could not enter pv on a managed page */ 912 return 1; 913 } 914 915 /* Now set attributes. */ 916 attr = pa_to_attr(pa); 917 #ifdef DIAGNOSTIC 918 if (!attr) 919 panic("managed but no attr\n"); 920 #endif 921 if (flags & VM_PROT_ALL) 922 *attr |= PTE_HI_REF; 923 if (flags & VM_PROT_WRITE) 924 *attr |= PTE_HI_CHG; 925 } 926 927 s = splvm(); 928 pm->pm_stats.resident_count++; 929 930 /* Insert page into page table. */ 931 pte_enter(pm, va, tte); 932 933 /* If this is a real fault, enter it in the tlb */ 934 if (tte && ((flags & PMAP_WIRED) == 0)) { 935 ppc4xx_tlb_enter(pm->pm_ctx, va, tte); 936 } 937 splx(s); 938 return 0; 939 } 940 941 void 942 pmap_unwire(struct pmap *pm, vaddr_t va) 943 { 944 struct pv_entry *pv, *npv; 945 paddr_t pa; 946 int s = splvm(); 947 948 if (pm == NULL) { 949 return; 950 } 951 952 if (!pmap_extract(pm, va, &pa)) { 953 return; 954 } 955 956 va |= PV_WIRED; 957 958 pv = pa_to_pv(pa); 959 if (!pv) return; 960 961 /* 962 * If it is the first entry on the list, it is actually 963 * in the header and we must copy the following entry up 964 * to the header. Otherwise we must search the list for 965 * the entry. In either case we free the now unused entry. 966 */ 967 for (npv = pv; (npv = pv->pv_next) != NULL; pv = npv) { 968 if (pm == npv->pv_pm && PV_CMPVA(va, npv)) { 969 npv->pv_va &= ~PV_WIRED; 970 break; 971 } 972 } 973 splx(s); 974 } 975 976 void 977 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot) 978 { 979 int s; 980 u_int tte; 981 struct pmap *pm = pmap_kernel(); 982 983 /* 984 * Have to remove any existing mapping first. 985 */ 986 987 /* 988 * Generate TTE. 989 * 990 * XXXX 991 * 992 * Since the kernel does not handle execution privileges properly, 993 * we will handle read and execute permissions together. 994 */ 995 tte = 0; 996 if (prot & VM_PROT_ALL) { 997 998 tte = TTE_PA(pa) | TTE_EX | TTE_ZONE(ZONE_PRIV); 999 /* XXXX -- need to support multiple page sizes. */ 1000 tte |= TTE_SZ_16K; 1001 #ifdef DIAGNOSTIC 1002 if ((prot & (PME_NOCACHE | PME_WRITETHROUG)) == 1003 (PME_NOCACHE | PME_WRITETHROUG)) 1004 panic("pmap_kenter_pa: uncached & writethrough\n"); 1005 #endif 1006 if (prot & PME_NOCACHE) 1007 /* Must be I/O mapping */ 1008 tte |= TTE_I | TTE_G; 1009 #ifdef NOCACHE 1010 tte |= TTE_I; 1011 #else 1012 else if (prot & PME_WRITETHROUG) 1013 /* Uncached and writethrough are not compatible */ 1014 tte |= TTE_W; 1015 #endif 1016 if (prot & VM_PROT_WRITE) 1017 tte |= TTE_WR; 1018 } 1019 1020 s = splvm(); 1021 pm->pm_stats.resident_count++; 1022 1023 /* Insert page into page table. */ 1024 pte_enter(pm, va, tte); 1025 splx(s); 1026 } 1027 1028 void 1029 pmap_kremove(vaddr_t va, vsize_t len) 1030 { 1031 1032 while (len > 0) { 1033 pte_enter(pmap_kernel(), va, 0); 1034 va += PAGE_SIZE; 1035 len -= PAGE_SIZE; 1036 } 1037 } 1038 1039 /* 1040 * Remove the given range of mapping entries. 1041 */ 1042 void 1043 pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva) 1044 { 1045 int s; 1046 paddr_t pa; 1047 volatile u_int *ptp; 1048 1049 s = splvm(); 1050 while (va < endva) { 1051 1052 if ((ptp = pte_find(pm, va)) && (pa = *ptp)) { 1053 pa = TTE_PA(pa); 1054 pmap_remove_pv(pm, va, pa); 1055 *ptp = 0; 1056 ppc4xx_tlb_flush(va, pm->pm_ctx); 1057 pm->pm_stats.resident_count--; 1058 } 1059 va += NBPG; 1060 } 1061 1062 splx(s); 1063 } 1064 1065 /* 1066 * Get the physical page address for the given pmap/virtual address. 1067 */ 1068 boolean_t 1069 pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap) 1070 { 1071 int seg = STIDX(va); 1072 int ptn = PTIDX(va); 1073 u_int pa = 0; 1074 int s = splvm(); 1075 1076 if (pm->pm_ptbl[seg] && (pa = pm->pm_ptbl[seg][ptn])) { 1077 *pap = TTE_PA(pa) | (va & PGOFSET); 1078 } 1079 splx(s); 1080 return (pa != 0); 1081 } 1082 1083 /* 1084 * Lower the protection on the specified range of this pmap. 1085 * 1086 * There are only two cases: either the protection is going to 0, 1087 * or it is going to read-only. 1088 */ 1089 void 1090 pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot) 1091 { 1092 volatile u_int *ptp; 1093 int s; 1094 1095 if (prot & VM_PROT_READ) { 1096 s = splvm(); 1097 while (sva < eva) { 1098 if ((ptp = pte_find(pm, sva)) != NULL) { 1099 *ptp &= ~TTE_WR; 1100 ppc4xx_tlb_flush(sva, pm->pm_ctx); 1101 } 1102 sva += NBPG; 1103 } 1104 splx(s); 1105 return; 1106 } 1107 pmap_remove(pm, sva, eva); 1108 } 1109 1110 boolean_t 1111 check_attr(struct vm_page *pg, u_int mask, int clear) 1112 { 1113 paddr_t pa = VM_PAGE_TO_PHYS(pg); 1114 int s; 1115 char *attr; 1116 int rv; 1117 1118 /* 1119 * First modify bits in cache. 1120 */ 1121 s = splvm(); 1122 attr = pa_to_attr(pa); 1123 if (attr == NULL) 1124 return FALSE; 1125 1126 rv = ((*attr & mask) != 0); 1127 if (clear) 1128 *attr &= ~mask; 1129 1130 splx(s); 1131 return rv; 1132 } 1133 1134 1135 /* 1136 * Lower the protection on the specified physical page. 1137 * 1138 * There are only two cases: either the protection is going to 0, 1139 * or it is going to read-only. 1140 */ 1141 void 1142 pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 1143 { 1144 paddr_t pa = VM_PAGE_TO_PHYS(pg); 1145 vaddr_t va; 1146 struct pv_entry *pvh, *pv, *npv; 1147 struct pmap *pm; 1148 1149 pvh = pa_to_pv(pa); 1150 if (pvh == NULL) 1151 return; 1152 1153 /* Handle extra pvs which may be deleted in the operation */ 1154 for (pv = pvh->pv_next; pv; pv = npv) { 1155 npv = pv->pv_next; 1156 1157 pm = pv->pv_pm; 1158 va = pv->pv_va; 1159 pmap_protect(pm, va, va+NBPG, prot); 1160 } 1161 /* Now check the head pv */ 1162 if (pvh->pv_pm) { 1163 pv = pvh; 1164 pm = pv->pv_pm; 1165 va = pv->pv_va; 1166 pmap_protect(pm, va, va+NBPG, prot); 1167 } 1168 } 1169 1170 /* 1171 * Activate the address space for the specified process. If the process 1172 * is the current process, load the new MMU context. 1173 */ 1174 void 1175 pmap_activate(struct proc *p) 1176 { 1177 #if 0 1178 struct pcb *pcb = &p->p_addr->u_pcb; 1179 pmap_t pmap = p->p_vmspace->vm_map.pmap; 1180 1181 /* 1182 * XXX Normally performed in cpu_fork(). 1183 */ 1184 printf("pmap_activate(%p), pmap=%p\n",p,pmap); 1185 if (pcb->pcb_pm != pmap) { 1186 pcb->pcb_pm = pmap; 1187 (void) pmap_extract(pmap_kernel(), (vaddr_t)pcb->pcb_pm, 1188 (paddr_t *)&pcb->pcb_pmreal); 1189 } 1190 1191 if (p == curproc) { 1192 /* Store pointer to new current pmap. */ 1193 curpm = pcb->pcb_pmreal; 1194 } 1195 #endif 1196 } 1197 1198 /* 1199 * Deactivate the specified process's address space. 1200 */ 1201 void 1202 pmap_deactivate(struct proc *p) 1203 { 1204 } 1205 1206 /* 1207 * Synchronize caches corresponding to [addr, addr+len) in p. 1208 */ 1209 void 1210 pmap_procwr(struct proc *p, vaddr_t va, size_t len) 1211 { 1212 struct pmap *pm = p->p_vmspace->vm_map.pmap; 1213 int msr, ctx, opid; 1214 1215 1216 /* 1217 * Need to turn off IMMU and switch to user context. 1218 * (icbi uses DMMU). 1219 */ 1220 if (!(ctx = pm->pm_ctx)) { 1221 /* No context -- assign it one */ 1222 ctx_alloc(pm); 1223 ctx = pm->pm_ctx; 1224 } 1225 __asm __volatile("mfmsr %0;" 1226 "li %1, 0x20;" 1227 "andc %1,%0,%1;" 1228 "mtmsr %1;" 1229 "sync;isync;" 1230 "mfpid %1;" 1231 "mtpid %2;" 1232 "sync; isync;" 1233 "1:" 1234 "dcbf 0,%3;" 1235 "icbi 0,%3;" 1236 "addi %3,%3,32;" 1237 "addic. %4,%4,-32;" 1238 "bge 1b;" 1239 "mtpid %1;" 1240 "mtmsr %0;" 1241 "sync; isync" 1242 : "=&r" (msr), "=&r" (opid) 1243 : "r" (ctx), "r" (va), "r" (len)); 1244 } 1245 1246 1247 /* This has to be done in real mode !!! */ 1248 void 1249 ppc4xx_tlb_flush(vaddr_t va, int pid) 1250 { 1251 u_long i, found; 1252 u_long msr; 1253 1254 /* If there's no context then it can't be mapped. */ 1255 if (!pid) return; 1256 1257 asm("mfpid %1;" /* Save PID */ 1258 "mfmsr %2;" /* Save MSR */ 1259 "li %0,0;" /* Now clear MSR */ 1260 "mtmsr %0;" 1261 "mtpid %4;" /* Set PID */ 1262 "sync;" 1263 "tlbsx. %0,0,%3;" /* Search TLB */ 1264 "sync;" 1265 "mtpid %1;" /* Restore PID */ 1266 "mtmsr %2;" /* Restore MSR */ 1267 "sync;isync;" 1268 "li %1,1;" 1269 "beq 1f;" 1270 "li %1,0;" 1271 "1:" 1272 : "=&r" (i), "=&r" (found), "=&r" (msr) 1273 : "r" (va), "r" (pid)); 1274 if (found && !TLB_LOCKED(i)) { 1275 1276 /* Now flush translation */ 1277 asm volatile( 1278 "tlbwe %0,%1,0;" 1279 "sync;isync;" 1280 : : "r" (0), "r" (i)); 1281 1282 tlb_info[i].ti_ctx = 0; 1283 tlb_info[i].ti_flags = 0; 1284 tlbnext = i; 1285 /* Successful flushes */ 1286 tlbflush_ev.ev_count++; 1287 } 1288 } 1289 1290 void 1291 ppc4xx_tlb_flush_all(void) 1292 { 1293 u_long i; 1294 1295 for (i = 0; i < NTLB; i++) 1296 if (!TLB_LOCKED(i)) { 1297 asm volatile( 1298 "tlbwe %0,%1,0;" 1299 "sync;isync;" 1300 : : "r" (0), "r" (i)); 1301 tlb_info[i].ti_ctx = 0; 1302 tlb_info[i].ti_flags = 0; 1303 } 1304 1305 asm volatile("sync;isync"); 1306 } 1307 1308 /* Find a TLB entry to evict. */ 1309 static int 1310 ppc4xx_tlb_find_victim(void) 1311 { 1312 int flags; 1313 1314 for (;;) { 1315 if (++tlbnext >= NTLB) 1316 tlbnext = TLB_NRESERVED; 1317 flags = tlb_info[tlbnext].ti_flags; 1318 if (!(flags & TLBF_USED) || 1319 (flags & (TLBF_LOCKED | TLBF_REF)) == 0) { 1320 u_long va, stack = (u_long)&va; 1321 1322 if (!((tlb_info[tlbnext].ti_va ^ stack) & (~PGOFSET)) && 1323 (tlb_info[tlbnext].ti_ctx == KERNEL_PID) && 1324 (flags & TLBF_USED)) { 1325 /* Kernel stack page */ 1326 flags |= TLBF_USED; 1327 tlb_info[tlbnext].ti_flags = flags; 1328 } else { 1329 /* Found it! */ 1330 return (tlbnext); 1331 } 1332 } else { 1333 tlb_info[tlbnext].ti_flags = (flags & ~TLBF_REF); 1334 } 1335 } 1336 } 1337 1338 void 1339 ppc4xx_tlb_enter(int ctx, vaddr_t va, u_int pte) 1340 { 1341 u_long th, tl, idx; 1342 tlbpid_t pid; 1343 u_short msr; 1344 int s; 1345 1346 tlbenter_ev.ev_count++; 1347 1348 th = (va & TLB_EPN_MASK) | 1349 (((pte & TTE_SZ_MASK) >> TTE_SZ_SHIFT) << TLB_SIZE_SHFT) | 1350 TLB_VALID; 1351 tl = pte; 1352 1353 s = splhigh(); 1354 idx = ppc4xx_tlb_find_victim(); 1355 1356 #ifdef DIAGNOSTIC 1357 if ((idx < TLB_NRESERVED) || (idx >= NTLB)) { 1358 panic("ppc4xx_tlb_enter: repacing entry %ld\n", idx); 1359 } 1360 #endif 1361 1362 tlb_info[idx].ti_va = (va & TLB_EPN_MASK); 1363 tlb_info[idx].ti_ctx = ctx; 1364 tlb_info[idx].ti_flags = TLBF_USED | TLBF_REF; 1365 1366 asm volatile( 1367 "mfmsr %0;" /* Save MSR */ 1368 "li %1,0;" 1369 "tlbwe %1,%3,0;" /* Invalidate old entry. */ 1370 "mtmsr %1;" /* Clear MSR */ 1371 "mfpid %1;" /* Save old PID */ 1372 "mtpid %2;" /* Load translation ctx */ 1373 "sync; isync;" 1374 #ifdef DEBUG 1375 "andi. %3,%3,63;" 1376 "tweqi %3,0;" /* XXXXX DEBUG trap on index 0 */ 1377 #endif 1378 "tlbwe %4,%3,1; tlbwe %5,%3,0;" /* Set TLB */ 1379 "sync; isync;" 1380 "mtpid %1; mtmsr %0;" /* Restore PID and MSR */ 1381 "sync; isync;" 1382 : "=&r" (msr), "=&r" (pid) 1383 : "r" (ctx), "r" (idx), "r" (tl), "r" (th)); 1384 splx(s); 1385 } 1386 1387 void 1388 ppc4xx_tlb_unpin(int i) 1389 { 1390 1391 if (i == -1) 1392 for (i = 0; i < TLB_NRESERVED; i++) 1393 tlb_info[i].ti_flags &= ~TLBF_LOCKED; 1394 else 1395 tlb_info[i].ti_flags &= ~TLBF_LOCKED; 1396 } 1397 1398 void 1399 ppc4xx_tlb_init(void) 1400 { 1401 int i; 1402 1403 /* Mark reserved TLB entries */ 1404 for (i = 0; i < TLB_NRESERVED; i++) { 1405 tlb_info[i].ti_flags = TLBF_LOCKED | TLBF_USED; 1406 tlb_info[i].ti_ctx = KERNEL_PID; 1407 } 1408 1409 /* Setup security zones */ 1410 /* Z0 - accessible by kernel only if TLB entry permissions allow 1411 * Z1,Z2 - access is controlled by TLB entry permissions 1412 * Z3 - full access regardless of TLB entry permissions 1413 */ 1414 1415 asm volatile( 1416 "mtspr %0,%1;" 1417 "sync;" 1418 :: "K"(SPR_ZPR), "r" (0x1b000000)); 1419 } 1420 1421 1422 /* 1423 * We should pass the ctx in from trap code. 1424 */ 1425 int 1426 pmap_tlbmiss(vaddr_t va, int ctx) 1427 { 1428 volatile u_int *pte; 1429 u_long tte; 1430 1431 tlbmiss_ev.ev_count++; 1432 1433 /* 1434 * XXXX We will reserve 0-0x80000000 for va==pa mappings. 1435 */ 1436 if (ctx != KERNEL_PID || (va & 0x80000000)) { 1437 pte = pte_find((struct pmap *)ctxbusy[ctx], va); 1438 if (pte == NULL) { 1439 /* Map unmanaged addresses directly for kernel access */ 1440 return 1; 1441 } 1442 tte = *pte; 1443 if (tte == 0) { 1444 return 1; 1445 } 1446 } else { 1447 /* Create a 16MB writeable mapping. */ 1448 #ifdef NOCACHE 1449 tte = TTE_PA(va) | TTE_ZONE(ZONE_PRIV) | TTE_SZ_16M | TTE_I | TTE_WR; 1450 #else 1451 tte = TTE_PA(va) | TTE_ZONE(ZONE_PRIV) | TTE_SZ_16M | TTE_WR; 1452 #endif 1453 } 1454 tlbhit_ev.ev_count++; 1455 ppc4xx_tlb_enter(ctx, va, tte); 1456 1457 return 0; 1458 } 1459 1460 /* 1461 * Flush all the entries matching a context from the TLB. 1462 */ 1463 static int 1464 ctx_flush(int cnum) 1465 { 1466 int i; 1467 1468 /* We gotta steal this context */ 1469 for (i = TLB_NRESERVED; i < NTLB; i++) { 1470 if (tlb_info[i].ti_ctx == cnum) { 1471 /* Can't steal ctx if it has a locked entry. */ 1472 if (TLB_LOCKED(i)) { 1473 #ifdef DIAGNOSTIC 1474 printf("ctx_flush: can't invalidate " 1475 "locked mapping %d " 1476 "for context %d\n", i, cnum); 1477 Debugger(); 1478 #endif 1479 return (1); 1480 } 1481 #ifdef DIAGNOSTIC 1482 if (i < TLB_NRESERVED) 1483 panic("TLB entry %d not locked\n", i); 1484 #endif 1485 /* Invalidate particular TLB entry regardless of locked status */ 1486 asm volatile("tlbwe %0,%1,0" : :"r"(0),"r"(i)); 1487 tlb_info[i].ti_flags = 0; 1488 } 1489 } 1490 return (0); 1491 } 1492 1493 /* 1494 * Allocate a context. If necessary, steal one from someone else. 1495 * 1496 * The new context is flushed from the TLB before returning. 1497 */ 1498 int 1499 ctx_alloc(struct pmap *pm) 1500 { 1501 int s, cnum; 1502 static int next = MINCTX; 1503 1504 if (pm == pmap_kernel()) { 1505 #ifdef DIAGNOSTIC 1506 printf("ctx_alloc: kernel pmap!\n"); 1507 #endif 1508 return (0); 1509 } 1510 s = splvm(); 1511 1512 /* Find a likely context. */ 1513 cnum = next; 1514 do { 1515 if ((++cnum) > NUMCTX) 1516 cnum = MINCTX; 1517 } while (ctxbusy[cnum] != NULL && cnum != next); 1518 1519 /* Now clean it out */ 1520 oops: 1521 if (cnum < MINCTX) 1522 cnum = MINCTX; /* Never steal ctx 0 or 1 */ 1523 if (ctx_flush(cnum)) { 1524 /* oops -- something's wired. */ 1525 if ((++cnum) > NUMCTX) 1526 cnum = MINCTX; 1527 goto oops; 1528 } 1529 1530 if (ctxbusy[cnum]) { 1531 #ifdef DEBUG 1532 /* We should identify this pmap and clear it */ 1533 printf("Warning: stealing context %d\n", cnum); 1534 #endif 1535 ctxbusy[cnum]->pm_ctx = 0; 1536 } 1537 ctxbusy[cnum] = pm; 1538 next = cnum; 1539 splx(s); 1540 pm->pm_ctx = cnum; 1541 1542 return cnum; 1543 } 1544 1545 /* 1546 * Give away a context. 1547 */ 1548 void 1549 ctx_free(struct pmap *pm) 1550 { 1551 int oldctx; 1552 1553 oldctx = pm->pm_ctx; 1554 1555 if (oldctx == 0) 1556 panic("ctx_free: freeing kernel context"); 1557 #ifdef DIAGNOSTIC 1558 if (ctxbusy[oldctx] == 0) 1559 printf("ctx_free: freeing free context %d\n", oldctx); 1560 if (ctxbusy[oldctx] != pm) { 1561 printf("ctx_free: freeing someone esle's context\n " 1562 "ctxbusy[%d] = %p, pm->pm_ctx = %p\n", 1563 oldctx, (void *)(u_long)ctxbusy[oldctx], pm); 1564 Debugger(); 1565 } 1566 #endif 1567 /* We should verify it has not been stolen and reallocated... */ 1568 ctxbusy[oldctx] = NULL; 1569 ctx_flush(oldctx); 1570 } 1571 1572 1573 #ifdef DEBUG 1574 /* 1575 * Test ref/modify handling. 1576 */ 1577 void pmap_testout __P((void)); 1578 void 1579 pmap_testout() 1580 { 1581 vaddr_t va; 1582 volatile int *loc; 1583 int val = 0; 1584 paddr_t pa; 1585 struct vm_page *pg; 1586 int ref, mod; 1587 1588 /* Allocate a page */ 1589 va = (vaddr_t)uvm_km_alloc1(kernel_map, NBPG, 1); 1590 loc = (int*)va; 1591 1592 pmap_extract(pmap_kernel(), va, &pa); 1593 pg = PHYS_TO_VM_PAGE(pa); 1594 pmap_unwire(pmap_kernel(), va); 1595 1596 pmap_remove(pmap_kernel(), va, va+1); 1597 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0); 1598 pmap_update(pmap_kernel()); 1599 1600 /* Now clear reference and modify */ 1601 ref = pmap_clear_reference(pg); 1602 mod = pmap_clear_modify(pg); 1603 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1604 (void *)(u_long)va, (long)pa, 1605 ref, mod); 1606 1607 /* Check it's properly cleared */ 1608 ref = pmap_is_referenced(pg); 1609 mod = pmap_is_modified(pg); 1610 printf("Checking cleared page: ref %d, mod %d\n", 1611 ref, mod); 1612 1613 /* Reference page */ 1614 val = *loc; 1615 1616 ref = pmap_is_referenced(pg); 1617 mod = pmap_is_modified(pg); 1618 printf("Referenced page: ref %d, mod %d val %x\n", 1619 ref, mod, val); 1620 1621 /* Now clear reference and modify */ 1622 ref = pmap_clear_reference(pg); 1623 mod = pmap_clear_modify(pg); 1624 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1625 (void *)(u_long)va, (long)pa, 1626 ref, mod); 1627 1628 /* Modify page */ 1629 *loc = 1; 1630 1631 ref = pmap_is_referenced(pg); 1632 mod = pmap_is_modified(pg); 1633 printf("Modified page: ref %d, mod %d\n", 1634 ref, mod); 1635 1636 /* Now clear reference and modify */ 1637 ref = pmap_clear_reference(pg); 1638 mod = pmap_clear_modify(pg); 1639 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1640 (void *)(u_long)va, (long)pa, 1641 ref, mod); 1642 1643 /* Check it's properly cleared */ 1644 ref = pmap_is_referenced(pg); 1645 mod = pmap_is_modified(pg); 1646 printf("Checking cleared page: ref %d, mod %d\n", 1647 ref, mod); 1648 1649 /* Modify page */ 1650 *loc = 1; 1651 1652 ref = pmap_is_referenced(pg); 1653 mod = pmap_is_modified(pg); 1654 printf("Modified page: ref %d, mod %d\n", 1655 ref, mod); 1656 1657 /* Check pmap_protect() */ 1658 pmap_protect(pmap_kernel(), va, va+1, VM_PROT_READ); 1659 pmap_update(pmap_kernel()); 1660 ref = pmap_is_referenced(pg); 1661 mod = pmap_is_modified(pg); 1662 printf("pmap_protect(VM_PROT_READ): ref %d, mod %d\n", 1663 ref, mod); 1664 1665 /* Now clear reference and modify */ 1666 ref = pmap_clear_reference(pg); 1667 mod = pmap_clear_modify(pg); 1668 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1669 (void *)(u_long)va, (long)pa, 1670 ref, mod); 1671 1672 /* Reference page */ 1673 val = *loc; 1674 1675 ref = pmap_is_referenced(pg); 1676 mod = pmap_is_modified(pg); 1677 printf("Referenced page: ref %d, mod %d val %x\n", 1678 ref, mod, val); 1679 1680 /* Now clear reference and modify */ 1681 ref = pmap_clear_reference(pg); 1682 mod = pmap_clear_modify(pg); 1683 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1684 (void *)(u_long)va, (long)pa, 1685 ref, mod); 1686 1687 /* Modify page */ 1688 #if 0 1689 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0); 1690 pmap_update(pmap_kernel()); 1691 #endif 1692 *loc = 1; 1693 1694 ref = pmap_is_referenced(pg); 1695 mod = pmap_is_modified(pg); 1696 printf("Modified page: ref %d, mod %d\n", 1697 ref, mod); 1698 1699 /* Check pmap_protect() */ 1700 pmap_protect(pmap_kernel(), va, va+1, VM_PROT_NONE); 1701 pmap_update(pmap_kernel()); 1702 ref = pmap_is_referenced(pg); 1703 mod = pmap_is_modified(pg); 1704 printf("pmap_protect(): ref %d, mod %d\n", 1705 ref, mod); 1706 1707 /* Now clear reference and modify */ 1708 ref = pmap_clear_reference(pg); 1709 mod = pmap_clear_modify(pg); 1710 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1711 (void *)(u_long)va, (long)pa, 1712 ref, mod); 1713 1714 /* Reference page */ 1715 val = *loc; 1716 1717 ref = pmap_is_referenced(pg); 1718 mod = pmap_is_modified(pg); 1719 printf("Referenced page: ref %d, mod %d val %x\n", 1720 ref, mod, val); 1721 1722 /* Now clear reference and modify */ 1723 ref = pmap_clear_reference(pg); 1724 mod = pmap_clear_modify(pg); 1725 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1726 (void *)(u_long)va, (long)pa, 1727 ref, mod); 1728 1729 /* Modify page */ 1730 #if 0 1731 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0); 1732 pmap_update(pmap_kernel()); 1733 #endif 1734 *loc = 1; 1735 1736 ref = pmap_is_referenced(pg); 1737 mod = pmap_is_modified(pg); 1738 printf("Modified page: ref %d, mod %d\n", 1739 ref, mod); 1740 1741 /* Check pmap_pag_protect() */ 1742 pmap_page_protect(pg, VM_PROT_READ); 1743 ref = pmap_is_referenced(pg); 1744 mod = pmap_is_modified(pg); 1745 printf("pmap_page_protect(VM_PROT_READ): ref %d, mod %d\n", 1746 ref, mod); 1747 1748 /* Now clear reference and modify */ 1749 ref = pmap_clear_reference(pg); 1750 mod = pmap_clear_modify(pg); 1751 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1752 (void *)(u_long)va, (long)pa, 1753 ref, mod); 1754 1755 /* Reference page */ 1756 val = *loc; 1757 1758 ref = pmap_is_referenced(pg); 1759 mod = pmap_is_modified(pg); 1760 printf("Referenced page: ref %d, mod %d val %x\n", 1761 ref, mod, val); 1762 1763 /* Now clear reference and modify */ 1764 ref = pmap_clear_reference(pg); 1765 mod = pmap_clear_modify(pg); 1766 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1767 (void *)(u_long)va, (long)pa, 1768 ref, mod); 1769 1770 /* Modify page */ 1771 #if 0 1772 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0); 1773 pmap_update(pmap_kernel()); 1774 #endif 1775 *loc = 1; 1776 1777 ref = pmap_is_referenced(pg); 1778 mod = pmap_is_modified(pg); 1779 printf("Modified page: ref %d, mod %d\n", 1780 ref, mod); 1781 1782 /* Check pmap_pag_protect() */ 1783 pmap_page_protect(pg, VM_PROT_NONE); 1784 ref = pmap_is_referenced(pg); 1785 mod = pmap_is_modified(pg); 1786 printf("pmap_page_protect(): ref %d, mod %d\n", 1787 ref, mod); 1788 1789 /* Now clear reference and modify */ 1790 ref = pmap_clear_reference(pg); 1791 mod = pmap_clear_modify(pg); 1792 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1793 (void *)(u_long)va, (long)pa, 1794 ref, mod); 1795 1796 1797 /* Reference page */ 1798 val = *loc; 1799 1800 ref = pmap_is_referenced(pg); 1801 mod = pmap_is_modified(pg); 1802 printf("Referenced page: ref %d, mod %d val %x\n", 1803 ref, mod, val); 1804 1805 /* Now clear reference and modify */ 1806 ref = pmap_clear_reference(pg); 1807 mod = pmap_clear_modify(pg); 1808 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1809 (void *)(u_long)va, (long)pa, 1810 ref, mod); 1811 1812 /* Modify page */ 1813 #if 0 1814 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0); 1815 pmap_update(pmap_kernel()); 1816 #endif 1817 *loc = 1; 1818 1819 ref = pmap_is_referenced(pg); 1820 mod = pmap_is_modified(pg); 1821 printf("Modified page: ref %d, mod %d\n", 1822 ref, mod); 1823 1824 /* Unmap page */ 1825 pmap_remove(pmap_kernel(), va, va+1); 1826 pmap_update(pmap_kernel()); 1827 ref = pmap_is_referenced(pg); 1828 mod = pmap_is_modified(pg); 1829 printf("Unmapped page: ref %d, mod %d\n", ref, mod); 1830 1831 /* Now clear reference and modify */ 1832 ref = pmap_clear_reference(pg); 1833 mod = pmap_clear_modify(pg); 1834 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1835 (void *)(u_long)va, (long)pa, ref, mod); 1836 1837 /* Check it's properly cleared */ 1838 ref = pmap_is_referenced(pg); 1839 mod = pmap_is_modified(pg); 1840 printf("Checking cleared page: ref %d, mod %d\n", 1841 ref, mod); 1842 1843 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 1844 VM_PROT_ALL|PMAP_WIRED); 1845 uvm_km_free(kernel_map, (vaddr_t)va, NBPG); 1846 } 1847 #endif 1848