1 /* $NetBSD: pmap.c,v 1.3 2001/07/22 13:08:09 wiz Exp $ */ 2 3 /* 4 * Copyright 2001 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 40 * Copyright (C) 1995, 1996 TooLs GmbH. 41 * All rights reserved. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. All advertising materials mentioning features or use of this software 52 * must display the following acknowledgement: 53 * This product includes software developed by TooLs GmbH. 54 * 4. The name of TooLs GmbH may not be used to endorse or promote products 55 * derived from this software without specific prior written permission. 56 * 57 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 58 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 59 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 60 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 62 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 63 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 64 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 65 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 66 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 67 */ 68 69 #undef NOCACHE 70 71 #include <sys/param.h> 72 #include <sys/malloc.h> 73 #include <sys/proc.h> 74 #include <sys/user.h> 75 #include <sys/queue.h> 76 #include <sys/systm.h> 77 #include <sys/pool.h> 78 #include <sys/device.h> 79 80 #include <uvm/uvm.h> 81 82 #include <machine/pcb.h> 83 #include <machine/powerpc.h> 84 85 #include <powerpc/spr.h> 86 #include <powerpc/ibm4xx/tlb.h> 87 88 89 #define CACHE_LINE 32 90 91 /* 92 * kernmap is an array of PTEs large enough to map in 93 * 4GB. At 16KB/page it is 256K entries or 2MB. 94 */ 95 #define KERNMAP_SIZE ((0xffffffffU/NBPG)+1) 96 caddr_t kernmap; 97 98 #define MINCTX 2 99 #define NUMCTX 256 100 volatile struct pmap *ctxbusy[NUMCTX]; 101 102 #define TLBF_USED 0x1 103 #define TLBF_REF 0x2 104 #define TLBF_LOCKED 0x4 105 #define TLB_LOCKED(i) (tlb_info[(i)].ti_flags & TLBF_LOCKED) 106 typedef struct tlb_info_s { 107 char ti_flags; 108 char ti_ctx; /* TLB_PID assiciated with the entry */ 109 u_int ti_va; 110 } tlb_info_t; 111 112 volatile tlb_info_t tlb_info[NTLB]; 113 /* We'll use a modified FIFO replacement policy cause it's cheap */ 114 volatile int tlbnext = TLB_NRESERVED; 115 116 u_long dtlb_miss_count = 0; 117 u_long itlb_miss_count = 0; 118 u_long ktlb_miss_count = 0; 119 u_long utlb_miss_count = 0; 120 121 /* Event counters -- XXX type `INTR' so we can see them with vmstat -i */ 122 struct evcnt tlbmiss_ev = EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 123 NULL, "cpu", "tlbmiss"); 124 struct evcnt tlbhit_ev = EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 125 NULL, "cpu", "tlbhit"); 126 struct evcnt tlbflush_ev = EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 127 NULL, "cpu", "tlbflush"); 128 struct evcnt tlbenter_ev = EVCNT_INITIALIZER(EVCNT_TYPE_INTR, 129 NULL, "cpu", "tlbenter"); 130 131 struct pmap kernel_pmap_; 132 133 int physmem; 134 static int npgs; 135 static u_int nextavail; 136 #ifndef MSGBUFADDR 137 extern paddr_t msgbuf_paddr; 138 #endif 139 140 static struct mem_region *mem, *avail; 141 142 /* 143 * This is a cache of referenced/modified bits. 144 * Bits herein are shifted by ATTRSHFT. 145 */ 146 static char *pmap_attrib; 147 148 #define PV_WIRED 0x1 149 #define PV_WIRE(pv) ((pv)->pv_va |= PV_WIRED) 150 #define PV_CMPVA(va,pv) (!(((pv)->pv_va^(va))&(~PV_WIRED))) 151 152 struct pv_entry { 153 struct pv_entry *pv_next; /* Linked list of mappings */ 154 vaddr_t pv_va; /* virtual address of mapping */ 155 struct pmap *pv_pm; 156 }; 157 158 struct pv_entry *pv_table; 159 static struct pool pv_pool; 160 161 static int pmap_initialized; 162 163 static int ctx_flush(int); 164 165 static inline void dcache_flush_page(vaddr_t); 166 static inline void icache_flush_page(vaddr_t); 167 static inline void dcache_flush(vaddr_t, vsize_t); 168 static inline void icache_flush(vaddr_t, vsize_t); 169 170 inline struct pv_entry *pa_to_pv(paddr_t); 171 static inline char *pa_to_attr(paddr_t); 172 173 static inline volatile u_int *pte_find(struct pmap *, vaddr_t); 174 static inline int pte_enter(struct pmap *, vaddr_t, u_int); 175 176 static void pmap_pinit(pmap_t); 177 static void pmap_release(pmap_t); 178 static inline int pmap_enter_pv(struct pmap *, vaddr_t, paddr_t); 179 static void pmap_remove_pv(struct pmap *, vaddr_t, paddr_t); 180 181 /* 182 * These small routines may have to be replaced, 183 * if/when we support processors other that the 604. 184 */ 185 186 static inline void 187 dcache_flush_page(vaddr_t va) 188 { 189 int i; 190 191 for (i = 0; i < NBPG; i += CACHE_LINE) 192 asm volatile("dcbf %0,%1" : : "r" (va), "r" (i)); 193 asm volatile("sync;isync" : : ); 194 } 195 196 static inline void 197 icache_flush_page(vaddr_t va) 198 { 199 int i; 200 201 for (i = 0; i < NBPG; i += CACHE_LINE) 202 asm volatile("icbi %0,%1" : : "r" (va), "r" (i)); 203 asm volatile("sync;isync" : : ); 204 } 205 206 static inline void 207 dcache_flush(vaddr_t va, vsize_t len) 208 { 209 int i; 210 211 if (len == 0) 212 return; 213 214 /* Make sure we flush all cache lines */ 215 len += va & (CACHE_LINE-1); 216 for (i = 0; i < len; i += CACHE_LINE) 217 asm volatile("dcbf %0,%1" : : "r" (va), "r" (i)); 218 asm volatile("sync;isync" : : ); 219 } 220 221 static inline void 222 icache_flush(vaddr_t va, vsize_t len) 223 { 224 int i; 225 226 if (len == 0) 227 return; 228 229 /* Make sure we flush all cache lines */ 230 len += va & (CACHE_LINE-1); 231 for (i = 0; i < len; i += CACHE_LINE) 232 asm volatile("icbi %0,%1" : : "r" (va), "r" (i)); 233 asm volatile("sync;isync" : : ); 234 } 235 236 inline struct pv_entry * 237 pa_to_pv(paddr_t pa) 238 { 239 int bank, pg; 240 241 bank = vm_physseg_find(atop(pa), &pg); 242 if (bank == -1) 243 return NULL; 244 return &vm_physmem[bank].pmseg.pvent[pg]; 245 } 246 247 static inline char * 248 pa_to_attr(paddr_t pa) 249 { 250 int bank, pg; 251 252 bank = vm_physseg_find(atop(pa), &pg); 253 if (bank == -1) 254 return NULL; 255 return &vm_physmem[bank].pmseg.attrs[pg]; 256 } 257 258 /* 259 * Insert PTE into page table. 260 */ 261 int 262 pte_enter(struct pmap *pm, vaddr_t va, u_int pte) 263 { 264 int seg = STIDX(va); 265 int ptn = PTIDX(va); 266 paddr_t pa; 267 268 if (!pm->pm_ptbl[seg]) { 269 /* Don't allocate a page to clear a non-existent mapping. */ 270 if (!pte) return (1); 271 /* Allocate a page XXXX this will sleep! */ 272 pa = 0; 273 pm->pm_ptbl[seg] = (uint *)uvm_km_alloc1(kernel_map, NBPG, 1); 274 } 275 pm->pm_ptbl[seg][ptn] = pte; 276 277 /* Flush entry. */ 278 ppc4xx_tlb_flush(va, pm->pm_ctx); 279 return (1); 280 } 281 282 /* 283 * Get a pointer to a PTE in a page table. 284 */ 285 volatile u_int * 286 pte_find(struct pmap *pm, vaddr_t va) 287 { 288 int seg = STIDX(va); 289 int ptn = PTIDX(va); 290 291 if (pm->pm_ptbl[seg]) 292 return (&pm->pm_ptbl[seg][ptn]); 293 294 return (NULL); 295 } 296 297 /* 298 * This is called during initppc, before the system is really initialized. 299 */ 300 void 301 pmap_bootstrap(u_int kernelstart, u_int kernelend) 302 { 303 struct mem_region *mp, *mp1; 304 int cnt, i; 305 u_int s, e, sz; 306 307 /* 308 * Allocate the kernel page table at the end of 309 * kernel space so it's in the locked TTE. 310 */ 311 kernmap = (caddr_t)kernelend; 312 kernelend += KERNMAP_SIZE*sizeof(struct pte); 313 314 /* 315 * Initialize kernel page table. 316 */ 317 memset(kernmap, 0, KERNMAP_SIZE*sizeof(struct pte)); 318 for (i = 0; i < STSZ; i++) { 319 pmap_kernel()->pm_ptbl[i] = (u_int *)(kernmap + i*NBPG); 320 } 321 ctxbusy[0] = ctxbusy[1] = pmap_kernel(); 322 323 /* 324 * Announce page-size to the VM-system 325 */ 326 uvmexp.pagesize = NBPG; 327 uvm_setpagesize(); 328 329 /* 330 * Get memory. 331 */ 332 mem_regions(&mem, &avail); 333 for (mp = mem; mp->size; mp++) { 334 physmem += btoc(mp->size); 335 printf("+%lx,",mp->size); 336 } 337 printf("\n"); 338 ppc4xx_tlb_init(); 339 /* 340 * Count the number of available entries. 341 */ 342 for (cnt = 0, mp = avail; mp->size; mp++) 343 cnt++; 344 345 /* 346 * Page align all regions. 347 * Non-page aligned memory isn't very interesting to us. 348 * Also, sort the entries for ascending addresses. 349 */ 350 kernelstart &= ~PGOFSET; 351 kernelend = (kernelend + PGOFSET) & ~PGOFSET; 352 for (mp = avail; mp->size; mp++) { 353 s = mp->start; 354 e = mp->start + mp->size; 355 printf("%08x-%08x -> ",s,e); 356 /* 357 * Check whether this region holds all of the kernel. 358 */ 359 if (s < kernelstart && e > kernelend) { 360 avail[cnt].start = kernelend; 361 avail[cnt++].size = e - kernelend; 362 e = kernelstart; 363 } 364 /* 365 * Look whether this regions starts within the kernel. 366 */ 367 if (s >= kernelstart && s < kernelend) { 368 if (e <= kernelend) 369 goto empty; 370 s = kernelend; 371 } 372 /* 373 * Now look whether this region ends within the kernel. 374 */ 375 if (e > kernelstart && e <= kernelend) { 376 if (s >= kernelstart) 377 goto empty; 378 e = kernelstart; 379 } 380 /* 381 * Now page align the start and size of the region. 382 */ 383 s = round_page(s); 384 e = trunc_page(e); 385 if (e < s) 386 e = s; 387 sz = e - s; 388 printf("%08x-%08x = %x\n",s,e,sz); 389 /* 390 * Check whether some memory is left here. 391 */ 392 if (sz == 0) { 393 empty: 394 memmove(mp, mp + 1, 395 (cnt - (mp - avail)) * sizeof *mp); 396 cnt--; 397 mp--; 398 continue; 399 } 400 /* 401 * Do an insertion sort. 402 */ 403 npgs += btoc(sz); 404 for (mp1 = avail; mp1 < mp; mp1++) 405 if (s < mp1->start) 406 break; 407 if (mp1 < mp) { 408 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1); 409 mp1->start = s; 410 mp1->size = sz; 411 } else { 412 mp->start = s; 413 mp->size = sz; 414 } 415 } 416 417 /* 418 * We cannot do pmap_steal_memory here, 419 * since we don't run with translation enabled yet. 420 */ 421 #ifndef MSGBUFADDR 422 /* 423 * allow for msgbuf 424 */ 425 sz = round_page(MSGBUFSIZE); 426 mp = NULL; 427 for (mp1 = avail; mp1->size; mp1++) 428 if (mp1->size >= sz) 429 mp = mp1; 430 if (mp == NULL) 431 panic("not enough memory?"); 432 433 npgs -= btoc(sz); 434 msgbuf_paddr = mp->start + mp->size - sz; 435 mp->size -= sz; 436 if (mp->size <= 0) 437 memmove(mp, mp + 1, (cnt - (mp - avail)) * sizeof *mp); 438 #endif 439 440 printf("Loading pages\n"); 441 for (mp = avail; mp->size; mp++) 442 uvm_page_physload(atop(mp->start), atop(mp->start + mp->size), 443 atop(mp->start), atop(mp->start + mp->size), 444 VM_FREELIST_DEFAULT); 445 446 /* 447 * Initialize kernel pmap and hardware. 448 */ 449 /* Setup TLB pid allocator so it knows we alreadu using PID 1 */ 450 pmap_kernel()->pm_ctx = KERNEL_PID; 451 nextavail = avail->start; 452 453 454 evcnt_attach_static(&tlbhit_ev); 455 evcnt_attach_static(&tlbmiss_ev); 456 evcnt_attach_static(&tlbflush_ev); 457 evcnt_attach_static(&tlbenter_ev); 458 printf("Done\n"); 459 } 460 461 /* 462 * Restrict given range to physical memory 463 * 464 * (Used by /dev/mem) 465 */ 466 void 467 pmap_real_memory(paddr_t *start, psize_t *size) 468 { 469 struct mem_region *mp; 470 471 for (mp = mem; mp->size; mp++) { 472 if (*start + *size > mp->start && 473 *start < mp->start + mp->size) { 474 if (*start < mp->start) { 475 *size -= mp->start - *start; 476 *start = mp->start; 477 } 478 if (*start + *size > mp->start + mp->size) 479 *size = mp->start + mp->size - *start; 480 return; 481 } 482 } 483 *size = 0; 484 } 485 486 /* 487 * Initialize anything else for pmap handling. 488 * Called during vm_init(). 489 */ 490 void 491 pmap_init(void) 492 { 493 struct pv_entry *pv; 494 vsize_t sz; 495 vaddr_t addr; 496 int i, s; 497 int bank; 498 char *attr; 499 500 sz = (vsize_t)((sizeof(struct pv_entry) + 1) * npgs); 501 sz = round_page(sz); 502 addr = uvm_km_zalloc(kernel_map, sz); 503 s = splvm(); 504 pv = pv_table = (struct pv_entry *)addr; 505 for (i = npgs; --i >= 0;) 506 pv++->pv_pm = NULL; 507 pmap_attrib = (char *)pv; 508 memset(pv, 0, npgs); 509 510 pv = pv_table; 511 attr = pmap_attrib; 512 for (bank = 0; bank < vm_nphysseg; bank++) { 513 sz = vm_physmem[bank].end - vm_physmem[bank].start; 514 vm_physmem[bank].pmseg.pvent = pv; 515 vm_physmem[bank].pmseg.attrs = attr; 516 pv += sz; 517 attr += sz; 518 } 519 520 pmap_initialized = 1; 521 splx(s); 522 523 /* Setup a pool for additional pvlist structures */ 524 pool_init(&pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pv_entry", 0, 525 NULL, NULL, 0); 526 } 527 528 /* 529 * How much virtual space is available to the kernel? 530 */ 531 void 532 pmap_virtual_space(vaddr_t *start, vaddr_t *end) 533 { 534 535 #if 0 536 /* 537 * Reserve one segment for kernel virtual memory 538 */ 539 *start = (vaddr_t)(KERNEL_SR << ADDR_SR_SHFT); 540 *end = *start + SEGMENT_LENGTH; 541 #else 542 *start = (vaddr_t) VM_MIN_KERNEL_ADDRESS; 543 *end = (vaddr_t) VM_MAX_KERNEL_ADDRESS; 544 #endif 545 } 546 547 /* 548 * Create and return a physical map. 549 */ 550 struct pmap * 551 pmap_create(void) 552 { 553 struct pmap *pm; 554 555 pm = (struct pmap *)malloc(sizeof *pm, M_VMPMAP, M_WAITOK); 556 memset((caddr_t)pm, 0, sizeof *pm); 557 pmap_pinit(pm); 558 return pm; 559 } 560 561 /* 562 * Initialize a preallocated and zeroed pmap structure. 563 */ 564 void 565 pmap_pinit(struct pmap *pm) 566 { 567 int i; 568 569 /* 570 * Allocate some segment registers for this pmap. 571 */ 572 pm->pm_refs = 1; 573 for (i = 0; i < STSZ; i++) 574 pm->pm_ptbl[i] = NULL; 575 } 576 577 /* 578 * Add a reference to the given pmap. 579 */ 580 void 581 pmap_reference(struct pmap *pm) 582 { 583 584 pm->pm_refs++; 585 } 586 587 /* 588 * Retire the given pmap from service. 589 * Should only be called if the map contains no valid mappings. 590 */ 591 void 592 pmap_destroy(struct pmap *pm) 593 { 594 595 if (--pm->pm_refs == 0) { 596 pmap_release(pm); 597 free((caddr_t)pm, M_VMPMAP); 598 } 599 } 600 601 /* 602 * Release any resources held by the given physical map. 603 * Called when a pmap initialized by pmap_pinit is being released. 604 */ 605 static void 606 pmap_release(struct pmap *pm) 607 { 608 int i; 609 610 for (i = 0; i < STSZ; i++) 611 if (pm->pm_ptbl[i]) { 612 uvm_km_free(kernel_map, (vaddr_t)pm->pm_ptbl[i], NBPG); 613 pm->pm_ptbl[i] = NULL; 614 } 615 if (pm->pm_ctx) ctx_free(pm); 616 } 617 618 /* 619 * Copy the range specified by src_addr/len 620 * from the source map to the range dst_addr/len 621 * in the destination map. 622 * 623 * This routine is only advisory and need not do anything. 624 */ 625 void 626 pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap, vaddr_t dst_addr, 627 vsize_t len, vaddr_t src_addr) 628 { 629 } 630 631 /* 632 * Require that all active physical maps contain no 633 * incorrect entries NOW. 634 */ 635 void 636 pmap_update(void) 637 { 638 } 639 640 /* 641 * Garbage collects the physical map system for 642 * pages which are no longer used. 643 * Success need not be guaranteed -- that is, there 644 * may well be pages which are not referenced, but 645 * others may be collected. 646 * Called by the pageout daemon when pages are scarce. 647 */ 648 void 649 pmap_collect(struct pmap *pm) 650 { 651 } 652 653 /* 654 * Fill the given physical page with zeroes. 655 */ 656 void 657 pmap_zero_page(paddr_t pa) 658 { 659 660 #ifdef NOCACHE 661 memset((caddr_t)pa, 0, NBPG); 662 #else 663 int i; 664 665 for (i = NBPG/CACHELINESIZE; i > 0; i--) { 666 __asm __volatile ("dcbz 0,%0" :: "r"(pa)); 667 pa += CACHELINESIZE; 668 } 669 #endif 670 } 671 672 /* 673 * Copy the given physical source page to its destination. 674 */ 675 void 676 pmap_copy_page(paddr_t src, paddr_t dst) 677 { 678 679 memcpy((caddr_t)dst, (caddr_t)src, NBPG); 680 dcache_flush_page(dst); 681 } 682 683 /* 684 * This returns whether this is the first mapping of a page. 685 */ 686 static inline int 687 pmap_enter_pv(struct pmap *pm, vaddr_t va, paddr_t pa) 688 { 689 struct pv_entry *pv, *npv = NULL; 690 int s; 691 692 if (!pmap_initialized) 693 return 0; 694 695 s = splvm(); 696 697 pv = pa_to_pv(pa); 698 if (!pv->pv_pm) { 699 /* 700 * No entries yet, use header as the first entry. 701 */ 702 pv->pv_va = va; 703 pv->pv_pm = pm; 704 pv->pv_next = NULL; 705 } else { 706 /* 707 * There is at least one other VA mapping this page. 708 * Place this entry after the header. 709 */ 710 npv = pool_get(&pv_pool, PR_WAITOK); 711 if (!npv) return (0); 712 npv->pv_va = va; 713 npv->pv_pm = pm; 714 npv->pv_next = pv->pv_next; 715 pv->pv_next = npv; 716 } 717 splx(s); 718 return (1); 719 } 720 721 static void 722 pmap_remove_pv(struct pmap *pm, vaddr_t va, paddr_t pa) 723 { 724 struct pv_entry *pv, *npv; 725 726 /* 727 * Remove from the PV table. 728 */ 729 pv = pa_to_pv(pa); 730 if (!pv) return; 731 732 /* 733 * If it is the first entry on the list, it is actually 734 * in the header and we must copy the following entry up 735 * to the header. Otherwise we must search the list for 736 * the entry. In either case we free the now unused entry. 737 */ 738 if (pm == pv->pv_pm && PV_CMPVA(va, pv)) { 739 if ((npv = pv->pv_next)) { 740 *pv = *npv; 741 pool_put(&pv_pool, npv); 742 } else 743 pv->pv_pm = NULL; 744 } else { 745 for (; (npv = pv->pv_next) != NULL; pv = npv) 746 if (pm == npv->pv_pm && PV_CMPVA(va, npv)) 747 break; 748 if (npv) { 749 pv->pv_next = npv->pv_next; 750 pool_put(&pv_pool, npv); 751 } 752 } 753 } 754 755 /* 756 * Insert physical page at pa into the given pmap at virtual address va. 757 */ 758 int 759 pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) 760 { 761 int s; 762 u_int tte; 763 int managed; 764 765 /* 766 * Have to remove any existing mapping first. 767 */ 768 pmap_remove(pm, va, va + NBPG); 769 770 if (flags & PMAP_WIRED) flags |= prot; 771 772 /* If it has no protections don't bother w/the rest */ 773 if (!(flags & VM_PROT_ALL)) 774 return (0); 775 776 managed = 0; 777 if (vm_physseg_find(atop(pa), NULL) != -1) 778 managed = 1; 779 780 /* 781 * Generate TTE. 782 * 783 * XXXX 784 * 785 * Since the kernel does not handle execution privileges properly, 786 * we will handle read and execute permissions together. 787 */ 788 tte = TTE_PA(pa) | TTE_EX; 789 /* XXXX -- need to support multiple page sizes. */ 790 tte |= TTE_SZ_16K; 791 #ifdef DIAGNOSTIC 792 if ((flags & (PME_NOCACHE | PME_WRITETHROUG)) == 793 (PME_NOCACHE | PME_WRITETHROUG)) 794 panic("pmap_enter: uncached & writethrough\n"); 795 #endif 796 if (flags & PME_NOCACHE) 797 /* Must be I/O mapping */ 798 tte |= TTE_I | TTE_G; 799 #ifdef NOCACHE 800 tte |= TTE_I; 801 #else 802 else if (flags & PME_WRITETHROUG) 803 /* Uncached and writethrough are not compatible */ 804 tte |= TTE_W; 805 #endif 806 if (pm == pmap_kernel()) 807 tte |= TTE_ZONE(ZONE_PRIV); 808 else 809 tte |= TTE_ZONE(ZONE_USER); 810 811 if (flags & VM_PROT_WRITE) 812 tte |= TTE_WR; 813 814 /* 815 * Now record mapping for later back-translation. 816 */ 817 if (pmap_initialized && managed) { 818 char *attr; 819 820 if (!pmap_enter_pv(pm, va, pa)) { 821 /* Could not enter pv on a managed page */ 822 return 1; 823 } 824 825 /* Now set attributes. */ 826 attr = pa_to_attr(pa); 827 #ifdef DIAGNOSTIC 828 if (!attr) 829 panic("managed but no attr\n"); 830 #endif 831 if (flags & VM_PROT_ALL) 832 *attr |= PTE_HI_REF; 833 if (flags & VM_PROT_WRITE) 834 *attr |= PTE_HI_CHG; 835 } 836 837 s = splvm(); 838 pm->pm_stats.resident_count++; 839 840 /* Insert page into page table. */ 841 pte_enter(pm, va, tte); 842 843 /* If this is a real fault, enter it in the tlb */ 844 if (tte && ((flags & PMAP_WIRED) == 0)) { 845 ppc4xx_tlb_enter(pm->pm_ctx, va, tte); 846 } 847 splx(s); 848 return 0; 849 } 850 851 void 852 pmap_unwire(struct pmap *pm, vaddr_t va) 853 { 854 struct pv_entry *pv, *npv; 855 paddr_t pa; 856 int s = splvm(); 857 858 if (pm == NULL) { 859 return; 860 } 861 862 if (!pmap_extract(pm, va, &pa)) { 863 return; 864 } 865 866 va |= PV_WIRED; 867 868 pv = pa_to_pv(pa); 869 if (!pv) return; 870 871 /* 872 * If it is the first entry on the list, it is actually 873 * in the header and we must copy the following entry up 874 * to the header. Otherwise we must search the list for 875 * the entry. In either case we free the now unused entry. 876 */ 877 for (npv = pv; (npv = pv->pv_next) != NULL; pv = npv) { 878 if (pm == npv->pv_pm && PV_CMPVA(va, npv)) { 879 npv->pv_va &= ~PV_WIRED; 880 break; 881 } 882 } 883 splx(s); 884 } 885 886 void 887 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot) 888 { 889 int s; 890 u_int tte; 891 struct pmap *pm = pmap_kernel(); 892 893 /* 894 * Have to remove any existing mapping first. 895 */ 896 897 /* 898 * Generate TTE. 899 * 900 * XXXX 901 * 902 * Since the kernel does not handle execution privileges properly, 903 * we will handle read and execute permissions together. 904 */ 905 tte = 0; 906 if (prot & VM_PROT_ALL) { 907 908 tte = TTE_PA(pa) | TTE_EX | TTE_ZONE(ZONE_PRIV); 909 /* XXXX -- need to support multiple page sizes. */ 910 tte |= TTE_SZ_16K; 911 #ifdef DIAGNOSTIC 912 if ((prot & (PME_NOCACHE | PME_WRITETHROUG)) == 913 (PME_NOCACHE | PME_WRITETHROUG)) 914 panic("pmap_kenter_pa: uncached & writethrough\n"); 915 #endif 916 if (prot & PME_NOCACHE) 917 /* Must be I/O mapping */ 918 tte |= TTE_I | TTE_G; 919 #ifdef NOCACHE 920 tte |= TTE_I; 921 #else 922 else if (prot & PME_WRITETHROUG) 923 /* Uncached and writethrough are not compatible */ 924 tte |= TTE_W; 925 #endif 926 if (prot & VM_PROT_WRITE) 927 tte |= TTE_WR; 928 } 929 930 s = splvm(); 931 pm->pm_stats.resident_count++; 932 933 /* Insert page into page table. */ 934 pte_enter(pm, va, tte); 935 splx(s); 936 } 937 938 void 939 pmap_kremove(vaddr_t va, vsize_t len) 940 { 941 942 while (len > 0) { 943 pte_enter(pmap_kernel(), va, 0); 944 va += PAGE_SIZE; 945 len -= PAGE_SIZE; 946 } 947 } 948 949 /* 950 * Remove the given range of mapping entries. 951 */ 952 void 953 pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva) 954 { 955 int s; 956 paddr_t pa; 957 volatile u_int *ptp; 958 959 s = splvm(); 960 while (va < endva) { 961 962 if ((ptp = pte_find(pm, va)) && (pa = *ptp)) { 963 pa = TTE_PA(pa); 964 pmap_remove_pv(pm, va, pa); 965 *ptp = 0; 966 ppc4xx_tlb_flush(va, pm->pm_ctx); 967 pm->pm_stats.resident_count--; 968 } 969 va += NBPG; 970 } 971 972 splx(s); 973 } 974 975 /* 976 * Get the physical page address for the given pmap/virtual address. 977 */ 978 boolean_t 979 pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap) 980 { 981 int seg = STIDX(va); 982 int ptn = PTIDX(va); 983 u_int pa = 0; 984 int s = splvm(); 985 986 if (pm->pm_ptbl[seg] && (pa = pm->pm_ptbl[seg][ptn])) { 987 *pap = TTE_PA(pa) | (va & PGOFSET); 988 } 989 splx(s); 990 return (pa != 0); 991 } 992 993 /* 994 * Lower the protection on the specified range of this pmap. 995 * 996 * There are only two cases: either the protection is going to 0, 997 * or it is going to read-only. 998 */ 999 void 1000 pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot) 1001 { 1002 volatile u_int *ptp; 1003 int s; 1004 1005 if (prot & VM_PROT_READ) { 1006 s = splvm(); 1007 while (sva < eva) { 1008 if ((ptp = pte_find(pm, sva)) != NULL) { 1009 *ptp &= ~TTE_WR; 1010 ppc4xx_tlb_flush(sva, pm->pm_ctx); 1011 } 1012 sva += NBPG; 1013 } 1014 splx(s); 1015 return; 1016 } 1017 pmap_remove(pm, sva, eva); 1018 } 1019 1020 boolean_t 1021 check_attr(struct vm_page *pg, u_int mask, int clear) 1022 { 1023 paddr_t pa = VM_PAGE_TO_PHYS(pg); 1024 int s; 1025 char *attr; 1026 int rv; 1027 1028 /* 1029 * First modify bits in cache. 1030 */ 1031 s = splvm(); 1032 attr = pa_to_attr(pa); 1033 if (attr == NULL) 1034 return FALSE; 1035 1036 rv = ((*attr & mask) != 0); 1037 if (clear) 1038 *attr &= ~mask; 1039 1040 splx(s); 1041 return rv; 1042 } 1043 1044 1045 /* 1046 * Lower the protection on the specified physical page. 1047 * 1048 * There are only two cases: either the protection is going to 0, 1049 * or it is going to read-only. 1050 */ 1051 void 1052 pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 1053 { 1054 paddr_t pa = VM_PAGE_TO_PHYS(pg); 1055 vaddr_t va; 1056 struct pv_entry *pvh, *pv, *npv; 1057 struct pmap *pm; 1058 1059 pvh = pa_to_pv(pa); 1060 if (pvh == NULL) 1061 return; 1062 1063 /* Handle extra pvs which may be deleted in the operation */ 1064 for (pv = pvh->pv_next; pv; pv = npv) { 1065 npv = pv->pv_next; 1066 1067 pm = pv->pv_pm; 1068 va = pv->pv_va; 1069 pmap_protect(pm, va, va+NBPG, prot); 1070 } 1071 /* Now check the head pv */ 1072 if (pvh->pv_pm) { 1073 pv = pvh; 1074 pm = pv->pv_pm; 1075 va = pv->pv_va; 1076 pmap_protect(pm, va, va+NBPG, prot); 1077 } 1078 } 1079 1080 /* 1081 * Activate the address space for the specified process. If the process 1082 * is the current process, load the new MMU context. 1083 */ 1084 void 1085 pmap_activate(struct proc *p) 1086 { 1087 #if 0 1088 struct pcb *pcb = &p->p_addr->u_pcb; 1089 pmap_t pmap = p->p_vmspace->vm_map.pmap; 1090 1091 /* 1092 * XXX Normally performed in cpu_fork(). 1093 */ 1094 printf("pmap_activate(%p), pmap=%p\n",p,pmap); 1095 if (pcb->pcb_pm != pmap) { 1096 pcb->pcb_pm = pmap; 1097 (void) pmap_extract(pmap_kernel(), (vaddr_t)pcb->pcb_pm, 1098 (paddr_t *)&pcb->pcb_pmreal); 1099 } 1100 1101 if (p == curproc) { 1102 /* Store pointer to new current pmap. */ 1103 curpm = pcb->pcb_pmreal; 1104 } 1105 #endif 1106 } 1107 1108 /* 1109 * Deactivate the specified process's address space. 1110 */ 1111 void 1112 pmap_deactivate(struct proc *p) 1113 { 1114 } 1115 1116 /* 1117 * Synchronize caches corresponding to [addr, addr+len) in p. 1118 */ 1119 void 1120 pmap_procwr(struct proc *p, vaddr_t va, size_t len) 1121 { 1122 struct pmap *pm = p->p_vmspace->vm_map.pmap; 1123 int msr, ctx, opid; 1124 1125 1126 /* 1127 * Need to turn off IMMU and switch to user context. 1128 * (icbi uses DMMU). 1129 */ 1130 if (!(ctx = pm->pm_ctx)) { 1131 /* No context -- assign it one */ 1132 ctx_alloc(pm); 1133 ctx = pm->pm_ctx; 1134 } 1135 __asm __volatile("mfmsr %0;" 1136 "li %1, 0x20;" 1137 "andc %1,%0,%1;" 1138 "mtmsr %1;" 1139 "sync;isync;" 1140 "mfpid %1;" 1141 "mtpid %2;" 1142 "sync; isync;" 1143 "1:" 1144 "dcbf 0,%3;" 1145 "icbi 0,%3;" 1146 "addi %3,%3,32;" 1147 "addic. %4,%4,-32;" 1148 "bge 1b;" 1149 "mtpid %1;" 1150 "mtmsr %0;" 1151 "sync; isync" 1152 : "=&r" (msr), "=&r" (opid) 1153 : "r" (ctx), "r" (va), "r" (len)); 1154 } 1155 1156 1157 /* This has to be done in real mode !!! */ 1158 void 1159 ppc4xx_tlb_flush(vaddr_t va, int pid) 1160 { 1161 u_long i, found; 1162 u_long msr; 1163 1164 /* If there's no context then it can't be mapped. */ 1165 if (!pid) return; 1166 1167 asm("mfpid %1;" /* Save PID */ 1168 "mfmsr %2;" /* Save MSR */ 1169 "li %0,0;" /* Now clear MSR */ 1170 "mtmsr %0;" 1171 "mtpid %4;" /* Set PID */ 1172 "sync;" 1173 "tlbsx. %0,0,%3;" /* Search TLB */ 1174 "sync;" 1175 "mtpid %1;" /* Restore PID */ 1176 "mtmsr %2;" /* Restore MSR */ 1177 "sync;isync;" 1178 "li %1,1;" 1179 "beq 1f;" 1180 "li %1,0;" 1181 "1:" 1182 : "=&r" (i), "=&r" (found), "=&r" (msr) 1183 : "r" (va), "r" (pid)); 1184 if (found && !TLB_LOCKED(i)) { 1185 1186 /* Now flush translation */ 1187 asm volatile( 1188 "tlbwe %0,%1,0;" 1189 "sync;isync;" 1190 : : "r" (0), "r" (i)); 1191 1192 tlb_info[i].ti_ctx = 0; 1193 tlb_info[i].ti_flags = 0; 1194 tlbnext = i; 1195 /* Successful flushes */ 1196 tlbflush_ev.ev_count++; 1197 } 1198 } 1199 1200 void 1201 ppc4xx_tlb_flush_all(void) 1202 { 1203 u_long i; 1204 1205 for (i = 0; i < NTLB; i++) 1206 if (!TLB_LOCKED(i)) { 1207 asm volatile( 1208 "tlbwe %0,%1,0;" 1209 "sync;isync;" 1210 : : "r" (0), "r" (i)); 1211 tlb_info[i].ti_ctx = 0; 1212 tlb_info[i].ti_flags = 0; 1213 } 1214 1215 asm volatile("sync;isync"); 1216 } 1217 1218 /* Find a TLB entry to evict. */ 1219 static int 1220 ppc4xx_tlb_find_victim(void) 1221 { 1222 int flags; 1223 1224 for (;;) { 1225 if (++tlbnext >= NTLB) 1226 tlbnext = TLB_NRESERVED; 1227 flags = tlb_info[tlbnext].ti_flags; 1228 if (!(flags & TLBF_USED) || 1229 (flags & (TLBF_LOCKED | TLBF_REF)) == 0) { 1230 u_long va, stack = (u_long)&va; 1231 1232 if (!((tlb_info[tlbnext].ti_va ^ stack) & (~PGOFSET)) && 1233 (tlb_info[tlbnext].ti_ctx == KERNEL_PID) && 1234 (flags & TLBF_USED)) { 1235 /* Kernel stack page */ 1236 flags |= TLBF_USED; 1237 tlb_info[tlbnext].ti_flags = flags; 1238 } else { 1239 /* Found it! */ 1240 return (tlbnext); 1241 } 1242 } else { 1243 tlb_info[tlbnext].ti_flags = (flags & ~TLBF_REF); 1244 } 1245 } 1246 } 1247 1248 void 1249 ppc4xx_tlb_enter(int ctx, vaddr_t va, u_int pte) 1250 { 1251 u_long th, tl, idx; 1252 tlbpid_t pid; 1253 u_short msr; 1254 int s; 1255 1256 tlbenter_ev.ev_count++; 1257 1258 th = (va & TLB_EPN_MASK) | 1259 (((pte & TTE_SZ_MASK) >> TTE_SZ_SHIFT) << TLB_SIZE_SHFT) | 1260 TLB_VALID; 1261 tl = pte; 1262 1263 s = splhigh(); 1264 idx = ppc4xx_tlb_find_victim(); 1265 1266 #ifdef DIAGNOSTIC 1267 if ((idx < TLB_NRESERVED) || (idx >= NTLB)) { 1268 panic("ppc4xx_tlb_enter: repacing entry %ld\n", idx); 1269 } 1270 #endif 1271 1272 tlb_info[idx].ti_va = (va & TLB_EPN_MASK); 1273 tlb_info[idx].ti_ctx = ctx; 1274 tlb_info[idx].ti_flags = TLBF_USED | TLBF_REF; 1275 1276 asm volatile( 1277 "mfmsr %0;" /* Save MSR */ 1278 "li %1,0;" 1279 "tlbwe %1,%3,0;" /* Invalidate old entry. */ 1280 "mtmsr %1;" /* Clear MSR */ 1281 "mfpid %1;" /* Save old PID */ 1282 "mtpid %2;" /* Load translation ctx */ 1283 "sync; isync;" 1284 #ifdef DEBUG 1285 "andi. %3,%3,63;" 1286 "tweqi %3,0;" /* XXXXX DEBUG trap on index 0 */ 1287 #endif 1288 "tlbwe %4,%3,1; tlbwe %5,%3,0;" /* Set TLB */ 1289 "sync; isync;" 1290 "mtpid %1; mtmsr %0;" /* Restore PID and MSR */ 1291 "sync; isync;" 1292 : "=&r" (msr), "=&r" (pid) 1293 : "r" (ctx), "r" (idx), "r" (tl), "r" (th)); 1294 splx(s); 1295 } 1296 1297 void 1298 ppc4xx_tlb_unpin(int i) 1299 { 1300 1301 if (i == -1) 1302 for (i = 0; i < TLB_NRESERVED; i++) 1303 tlb_info[i].ti_flags &= ~TLBF_LOCKED; 1304 else 1305 tlb_info[i].ti_flags &= ~TLBF_LOCKED; 1306 } 1307 1308 void 1309 ppc4xx_tlb_init(void) 1310 { 1311 int i; 1312 1313 /* Mark reserved TLB entries */ 1314 for (i = 0; i < TLB_NRESERVED; i++) { 1315 tlb_info[i].ti_flags = TLBF_LOCKED | TLBF_USED; 1316 tlb_info[i].ti_ctx = KERNEL_PID; 1317 } 1318 1319 /* Setup security zones */ 1320 /* Z0 - accessible by kernel only if TLB entry permissions allow 1321 * Z1,Z2 - access is controlled by TLB entry permissions 1322 * Z3 - full access regardless of TLB entry permissions 1323 */ 1324 1325 asm volatile( 1326 "mtspr %0,%1;" 1327 "sync;" 1328 :: "K"(SPR_ZPR), "r" (0x1b000000)); 1329 } 1330 1331 1332 /* 1333 * We should pass the ctx in from trap code. 1334 */ 1335 int 1336 pmap_tlbmiss(vaddr_t va, int ctx) 1337 { 1338 volatile u_int *pte; 1339 u_long tte; 1340 1341 tlbmiss_ev.ev_count++; 1342 1343 /* 1344 * XXXX We will reserve 0-0x80000000 for va==pa mappings. 1345 */ 1346 if (ctx != KERNEL_PID || (va & 0x80000000)) { 1347 pte = pte_find((struct pmap *)ctxbusy[ctx], va); 1348 if (pte == NULL) { 1349 /* Map unmanaged addresses directly for kernel access */ 1350 return 1; 1351 } 1352 tte = *pte; 1353 if (tte == 0) { 1354 return 1; 1355 } 1356 } else { 1357 /* Create a 16MB writeable mapping. */ 1358 #ifdef NOCACHE 1359 tte = TTE_PA(va) | TTE_ZONE(ZONE_PRIV) | TTE_SZ_16M | TTE_I | TTE_WR; 1360 #else 1361 tte = TTE_PA(va) | TTE_ZONE(ZONE_PRIV) | TTE_SZ_16M | TTE_WR; 1362 #endif 1363 } 1364 tlbhit_ev.ev_count++; 1365 ppc4xx_tlb_enter(ctx, va, tte); 1366 1367 return 0; 1368 } 1369 1370 /* 1371 * Flush all the entries matching a context from the TLB. 1372 */ 1373 static int 1374 ctx_flush(int cnum) 1375 { 1376 int i; 1377 1378 /* We gotta steal this context */ 1379 for (i = TLB_NRESERVED; i < NTLB; i++) { 1380 if (tlb_info[i].ti_ctx == cnum) { 1381 /* Can't steal ctx if it has a locked entry. */ 1382 if (TLB_LOCKED(i)) { 1383 #ifdef DIAGNOSTIC 1384 printf("ctx_flush: can't invalidate " 1385 "locked mapping %d " 1386 "for context %d\n", i, cnum); 1387 Debugger(); 1388 #endif 1389 return (1); 1390 } 1391 #ifdef DIAGNOSTIC 1392 if (i < TLB_NRESERVED) 1393 panic("TLB entry %d not locked\n", i); 1394 #endif 1395 /* Invalidate particular TLB entry regardless of locked status */ 1396 asm volatile("tlbwe %0,%1,0" : :"r"(0),"r"(i)); 1397 tlb_info[i].ti_flags = 0; 1398 } 1399 } 1400 return (0); 1401 } 1402 1403 /* 1404 * Allocate a context. If necessary, steal one from someone else. 1405 * 1406 * The new context is flushed from the TLB before returning. 1407 */ 1408 int 1409 ctx_alloc(struct pmap *pm) 1410 { 1411 int s, cnum; 1412 static int next = MINCTX; 1413 1414 if (pm == pmap_kernel()) { 1415 #ifdef DIAGNOSTIC 1416 printf("ctx_alloc: kernel pmap!\n"); 1417 #endif 1418 return (0); 1419 } 1420 s = splvm(); 1421 1422 /* Find a likely context. */ 1423 cnum = next; 1424 do { 1425 if ((++cnum) > NUMCTX) 1426 cnum = MINCTX; 1427 } while (ctxbusy[cnum] != NULL && cnum != next); 1428 1429 /* Now clean it out */ 1430 oops: 1431 if (cnum < MINCTX) 1432 cnum = MINCTX; /* Never steal ctx 0 or 1 */ 1433 if (ctx_flush(cnum)) { 1434 /* oops -- something's wired. */ 1435 if ((++cnum) > NUMCTX) 1436 cnum = MINCTX; 1437 goto oops; 1438 } 1439 1440 if (ctxbusy[cnum]) { 1441 #ifdef DEBUG 1442 /* We should identify this pmap and clear it */ 1443 printf("Warning: stealing context %d\n", cnum); 1444 #endif 1445 ctxbusy[cnum]->pm_ctx = 0; 1446 } 1447 ctxbusy[cnum] = pm; 1448 next = cnum; 1449 splx(s); 1450 pm->pm_ctx = cnum; 1451 1452 return cnum; 1453 } 1454 1455 /* 1456 * Give away a context. 1457 */ 1458 void 1459 ctx_free(struct pmap *pm) 1460 { 1461 int oldctx; 1462 1463 oldctx = pm->pm_ctx; 1464 1465 if (oldctx == 0) 1466 panic("ctx_free: freeing kernel context"); 1467 #ifdef DIAGNOSTIC 1468 if (ctxbusy[oldctx] == 0) 1469 printf("ctx_free: freeing free context %d\n", oldctx); 1470 if (ctxbusy[oldctx] != pm) { 1471 printf("ctx_free: freeing someone esle's context\n " 1472 "ctxbusy[%d] = %p, pm->pm_ctx = %p\n", 1473 oldctx, (void *)(u_long)ctxbusy[oldctx], pm); 1474 Debugger(); 1475 } 1476 #endif 1477 /* We should verify it has not been stolen and reallocated... */ 1478 ctxbusy[oldctx] = NULL; 1479 ctx_flush(oldctx); 1480 } 1481 1482 #ifdef DEBUG 1483 /* 1484 * Test ref/modify handling. 1485 */ 1486 void pmap_testout __P((void)); 1487 void 1488 pmap_testout() 1489 { 1490 vaddr_t va; 1491 volatile int *loc; 1492 int val = 0; 1493 paddr_t pa; 1494 struct vm_page *pg; 1495 int ref, mod; 1496 1497 /* Allocate a page */ 1498 va = (vaddr_t)uvm_km_alloc1(kernel_map, NBPG, 1); 1499 loc = (int*)va; 1500 1501 pmap_extract(pmap_kernel(), va, &pa); 1502 pg = PHYS_TO_VM_PAGE(pa); 1503 pmap_unwire(pmap_kernel(), va); 1504 1505 pmap_remove(pmap_kernel(), va, va+1); 1506 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0); 1507 pmap_update(); 1508 1509 /* Now clear reference and modify */ 1510 ref = pmap_clear_reference(pg); 1511 mod = pmap_clear_modify(pg); 1512 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1513 (void *)(u_long)va, (long)pa, 1514 ref, mod); 1515 1516 /* Check it's properly cleared */ 1517 ref = pmap_is_referenced(pg); 1518 mod = pmap_is_modified(pg); 1519 printf("Checking cleared page: ref %d, mod %d\n", 1520 ref, mod); 1521 1522 /* Reference page */ 1523 val = *loc; 1524 1525 ref = pmap_is_referenced(pg); 1526 mod = pmap_is_modified(pg); 1527 printf("Referenced page: ref %d, mod %d val %x\n", 1528 ref, mod, val); 1529 1530 /* Now clear reference and modify */ 1531 ref = pmap_clear_reference(pg); 1532 mod = pmap_clear_modify(pg); 1533 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1534 (void *)(u_long)va, (long)pa, 1535 ref, mod); 1536 1537 /* Modify page */ 1538 *loc = 1; 1539 1540 ref = pmap_is_referenced(pg); 1541 mod = pmap_is_modified(pg); 1542 printf("Modified page: ref %d, mod %d\n", 1543 ref, mod); 1544 1545 /* Now clear reference and modify */ 1546 ref = pmap_clear_reference(pg); 1547 mod = pmap_clear_modify(pg); 1548 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1549 (void *)(u_long)va, (long)pa, 1550 ref, mod); 1551 1552 /* Check it's properly cleared */ 1553 ref = pmap_is_referenced(pg); 1554 mod = pmap_is_modified(pg); 1555 printf("Checking cleared page: ref %d, mod %d\n", 1556 ref, mod); 1557 1558 /* Modify page */ 1559 *loc = 1; 1560 1561 ref = pmap_is_referenced(pg); 1562 mod = pmap_is_modified(pg); 1563 printf("Modified page: ref %d, mod %d\n", 1564 ref, mod); 1565 1566 /* Check pmap_protect() */ 1567 pmap_protect(pmap_kernel(), va, va+1, VM_PROT_READ); 1568 pmap_update(); 1569 ref = pmap_is_referenced(pg); 1570 mod = pmap_is_modified(pg); 1571 printf("pmap_protect(VM_PROT_READ): ref %d, mod %d\n", 1572 ref, mod); 1573 1574 /* Now clear reference and modify */ 1575 ref = pmap_clear_reference(pg); 1576 mod = pmap_clear_modify(pg); 1577 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1578 (void *)(u_long)va, (long)pa, 1579 ref, mod); 1580 1581 /* Reference page */ 1582 val = *loc; 1583 1584 ref = pmap_is_referenced(pg); 1585 mod = pmap_is_modified(pg); 1586 printf("Referenced page: ref %d, mod %d val %x\n", 1587 ref, mod, val); 1588 1589 /* Now clear reference and modify */ 1590 ref = pmap_clear_reference(pg); 1591 mod = pmap_clear_modify(pg); 1592 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1593 (void *)(u_long)va, (long)pa, 1594 ref, mod); 1595 1596 /* Modify page */ 1597 #if 0 1598 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0); 1599 pmap_update(); 1600 #endif 1601 *loc = 1; 1602 1603 ref = pmap_is_referenced(pg); 1604 mod = pmap_is_modified(pg); 1605 printf("Modified page: ref %d, mod %d\n", 1606 ref, mod); 1607 1608 /* Check pmap_protect() */ 1609 pmap_protect(pmap_kernel(), va, va+1, VM_PROT_NONE); 1610 pmap_update(); 1611 ref = pmap_is_referenced(pg); 1612 mod = pmap_is_modified(pg); 1613 printf("pmap_protect(): ref %d, mod %d\n", 1614 ref, mod); 1615 1616 /* Now clear reference and modify */ 1617 ref = pmap_clear_reference(pg); 1618 mod = pmap_clear_modify(pg); 1619 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1620 (void *)(u_long)va, (long)pa, 1621 ref, mod); 1622 1623 /* Reference page */ 1624 val = *loc; 1625 1626 ref = pmap_is_referenced(pg); 1627 mod = pmap_is_modified(pg); 1628 printf("Referenced page: ref %d, mod %d val %x\n", 1629 ref, mod, val); 1630 1631 /* Now clear reference and modify */ 1632 ref = pmap_clear_reference(pg); 1633 mod = pmap_clear_modify(pg); 1634 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1635 (void *)(u_long)va, (long)pa, 1636 ref, mod); 1637 1638 /* Modify page */ 1639 #if 0 1640 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0); 1641 pmap_update(); 1642 #endif 1643 *loc = 1; 1644 1645 ref = pmap_is_referenced(pg); 1646 mod = pmap_is_modified(pg); 1647 printf("Modified page: ref %d, mod %d\n", 1648 ref, mod); 1649 1650 /* Check pmap_pag_protect() */ 1651 pmap_page_protect(pg, VM_PROT_READ); 1652 ref = pmap_is_referenced(pg); 1653 mod = pmap_is_modified(pg); 1654 printf("pmap_page_protect(VM_PROT_READ): ref %d, mod %d\n", 1655 ref, mod); 1656 1657 /* Now clear reference and modify */ 1658 ref = pmap_clear_reference(pg); 1659 mod = pmap_clear_modify(pg); 1660 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1661 (void *)(u_long)va, (long)pa, 1662 ref, mod); 1663 1664 /* Reference page */ 1665 val = *loc; 1666 1667 ref = pmap_is_referenced(pg); 1668 mod = pmap_is_modified(pg); 1669 printf("Referenced page: ref %d, mod %d val %x\n", 1670 ref, mod, val); 1671 1672 /* Now clear reference and modify */ 1673 ref = pmap_clear_reference(pg); 1674 mod = pmap_clear_modify(pg); 1675 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1676 (void *)(u_long)va, (long)pa, 1677 ref, mod); 1678 1679 /* Modify page */ 1680 #if 0 1681 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0); 1682 pmap_update(); 1683 #endif 1684 *loc = 1; 1685 1686 ref = pmap_is_referenced(pg); 1687 mod = pmap_is_modified(pg); 1688 printf("Modified page: ref %d, mod %d\n", 1689 ref, mod); 1690 1691 /* Check pmap_pag_protect() */ 1692 pmap_page_protect(pg, VM_PROT_NONE); 1693 ref = pmap_is_referenced(pg); 1694 mod = pmap_is_modified(pg); 1695 printf("pmap_page_protect(): ref %d, mod %d\n", 1696 ref, mod); 1697 1698 /* Now clear reference and modify */ 1699 ref = pmap_clear_reference(pg); 1700 mod = pmap_clear_modify(pg); 1701 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1702 (void *)(u_long)va, (long)pa, 1703 ref, mod); 1704 1705 1706 /* Reference page */ 1707 val = *loc; 1708 1709 ref = pmap_is_referenced(pg); 1710 mod = pmap_is_modified(pg); 1711 printf("Referenced page: ref %d, mod %d val %x\n", 1712 ref, mod, val); 1713 1714 /* Now clear reference and modify */ 1715 ref = pmap_clear_reference(pg); 1716 mod = pmap_clear_modify(pg); 1717 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1718 (void *)(u_long)va, (long)pa, 1719 ref, mod); 1720 1721 /* Modify page */ 1722 #if 0 1723 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0); 1724 pmap_update(); 1725 #endif 1726 *loc = 1; 1727 1728 ref = pmap_is_referenced(pg); 1729 mod = pmap_is_modified(pg); 1730 printf("Modified page: ref %d, mod %d\n", 1731 ref, mod); 1732 1733 /* Unmap page */ 1734 pmap_remove(pmap_kernel(), va, va+1); 1735 pmap_update(); 1736 ref = pmap_is_referenced(pg); 1737 mod = pmap_is_modified(pg); 1738 printf("Unmapped page: ref %d, mod %d\n", ref, mod); 1739 1740 /* Now clear reference and modify */ 1741 ref = pmap_clear_reference(pg); 1742 mod = pmap_clear_modify(pg); 1743 printf("Clearing page va %p pa %lx: ref %d, mod %d\n", 1744 (void *)(u_long)va, (long)pa, ref, mod); 1745 1746 /* Check it's properly cleared */ 1747 ref = pmap_is_referenced(pg); 1748 mod = pmap_is_modified(pg); 1749 printf("Checking cleared page: ref %d, mod %d\n", 1750 ref, mod); 1751 1752 pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 1753 VM_PROT_ALL|PMAP_WIRED); 1754 uvm_km_free(kernel_map, (vaddr_t)va, NBPG); 1755 } 1756 #endif 1757