152118Smckusick /* 252118Smckusick * Copyright (c) 1992 The Regents of the University of California. 352118Smckusick * All rights reserved. 452118Smckusick * 552118Smckusick * This code is derived from software contributed to Berkeley by 652118Smckusick * the Systems Programming Group of the University of Utah Computer 752118Smckusick * Science Department and Ralph Campbell. 852118Smckusick * 952118Smckusick * %sccs.include.redist.c% 1052118Smckusick * 11*52606Smckusick * @(#)pmap.c 7.2 (Berkeley) 02/19/92 1252118Smckusick */ 1352118Smckusick 1452118Smckusick /* 1552118Smckusick * Manages physical address maps. 1652118Smckusick * 1752118Smckusick * In addition to hardware address maps, this 1852118Smckusick * module is called upon to provide software-use-only 1952118Smckusick * maps which may or may not be stored in the same 2052118Smckusick * form as hardware maps. These pseudo-maps are 2152118Smckusick * used to store intermediate results from copy 2252118Smckusick * operations to and from address spaces. 2352118Smckusick * 2452118Smckusick * Since the information managed by this module is 2552118Smckusick * also stored by the logical address mapping module, 2652118Smckusick * this module may throw away valid virtual-to-physical 2752118Smckusick * mappings at almost any time. However, invalidations 2852118Smckusick * of virtual-to-physical mappings must be done as 2952118Smckusick * requested. 3052118Smckusick * 3152118Smckusick * In order to cope with hardware architectures which 3252118Smckusick * make virtual-to-physical map invalidates expensive, 3352118Smckusick * this module may delay invalidate or reduced protection 3452118Smckusick * operations until such time as they are actually 3552118Smckusick * necessary. This module is given full information as 3652118Smckusick * to which processors are currently using which maps, 3752118Smckusick * and to when physical maps must be made correct. 3852118Smckusick */ 3952118Smckusick 4052118Smckusick #include "param.h" 4152118Smckusick #include "proc.h" 4252118Smckusick #include "malloc.h" 4352118Smckusick #include "user.h" 4452118Smckusick 4552118Smckusick #include "vm/vm.h" 4652118Smckusick #include "vm/vm_kern.h" 4752118Smckusick #include "vm/vm_page.h" 4852118Smckusick 4952118Smckusick #include "../include/machConst.h" 5052118Smckusick #include "pte.h" 5152118Smckusick 5252118Smckusick /* 5352118Smckusick * For each vm_page_t, there is a list of all currently valid virtual 5452118Smckusick * mappings of that page. An entry is a pv_entry_t, the list is pv_table. 5552118Smckusick * XXX really should do this as a part of the higher level code. 5652118Smckusick */ 5752118Smckusick typedef struct pv_entry { 5852118Smckusick struct pv_entry *pv_next; /* next pv_entry */ 5952118Smckusick struct pmap *pv_pmap; /* pmap where mapping lies */ 6052118Smckusick vm_offset_t pv_va; /* virtual address for mapping */ 6152118Smckusick int pv_flags; /* flags */ 6252118Smckusick } *pv_entry_t; 6352118Smckusick 6452118Smckusick pv_entry_t pv_table; /* array of entries, one per page */ 6552118Smckusick extern void pmap_remove_pv(); 6652118Smckusick 6752118Smckusick #define pa_index(pa) atop((pa) - first_phys_addr) 6852118Smckusick #define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) 6952118Smckusick 7052118Smckusick #ifdef DEBUG 7152118Smckusick struct { 7252118Smckusick int kernel; /* entering kernel mapping */ 7352118Smckusick int user; /* entering user mapping */ 7452118Smckusick int ptpneeded; /* needed to allocate a PT page */ 7552118Smckusick int pwchange; /* no mapping change, just wiring or protection */ 7652118Smckusick int wchange; /* no mapping change, just wiring */ 7752118Smckusick int mchange; /* was mapped but mapping to different page */ 7852118Smckusick int managed; /* a managed page */ 7952118Smckusick int firstpv; /* first mapping for this PA */ 8052118Smckusick int secondpv; /* second mapping for this PA */ 8152118Smckusick int ci; /* cache inhibited */ 8252118Smckusick int unmanaged; /* not a managed page */ 8352118Smckusick int flushes; /* cache flushes */ 8452118Smckusick int cachehit; /* new entry forced valid entry out */ 8552118Smckusick } enter_stats; 8652118Smckusick struct { 8752118Smckusick int calls; 8852118Smckusick int removes; 8952118Smckusick int flushes; 9052118Smckusick int pidflushes; /* HW pid stolen */ 9152118Smckusick int pvfirst; 9252118Smckusick int pvsearch; 9352118Smckusick } remove_stats; 9452118Smckusick 9552118Smckusick int pmapdebug; 9652118Smckusick #define PDB_FOLLOW 0x0001 9752118Smckusick #define PDB_INIT 0x0002 9852118Smckusick #define PDB_ENTER 0x0004 9952118Smckusick #define PDB_REMOVE 0x0008 10052118Smckusick #define PDB_CREATE 0x0010 10152118Smckusick #define PDB_PTPAGE 0x0020 10252118Smckusick #define PDB_CACHE 0x0040 10352118Smckusick #define PDB_BITS 0x0080 10452118Smckusick #define PDB_COLLECT 0x0100 10552118Smckusick #define PDB_PROTECT 0x0200 10652118Smckusick #define PDB_TLBPID 0x0400 10752118Smckusick #define PDB_PARANOIA 0x2000 10852118Smckusick #define PDB_WIRING 0x4000 10952118Smckusick #define PDB_PVDUMP 0x8000 11052118Smckusick 11152118Smckusick #endif /* DEBUG */ 11252118Smckusick 11352118Smckusick u_int whichpids[2] = { /* bit mask of hardware PID's in use */ 11452118Smckusick 3, 0 11552118Smckusick }; 11652118Smckusick 11752118Smckusick struct pmap kernel_pmap_store; 11852118Smckusick pmap_t cur_pmap; /* current pmap mapped in hardware */ 11952118Smckusick 12052118Smckusick vm_offset_t avail_start; /* PA of first available physical page */ 12152118Smckusick vm_offset_t avail_end; /* PA of last available physical page */ 12252118Smckusick vm_size_t mem_size; /* memory size in bytes */ 12352118Smckusick vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/ 12452118Smckusick vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 12552118Smckusick int pmaxpagesperpage; /* PAGE_SIZE / NBPG */ 12652118Smckusick #ifdef ATTR 12752118Smckusick char *pmap_attributes; /* reference and modify bits */ 12852118Smckusick #endif 12952118Smckusick pmap_hash_t zero_pmap_hash; /* empty TLB hash table for init */ 13052118Smckusick 13152118Smckusick /* 13252118Smckusick * Bootstrap the system enough to run with virtual memory. 13352118Smckusick */ 13452118Smckusick void 13552118Smckusick pmap_bootstrap(firstaddr) 13652118Smckusick vm_offset_t firstaddr; 13752118Smckusick { 13852118Smckusick register int i; 13952118Smckusick vm_offset_t start = firstaddr; 14052118Smckusick extern int maxmem, physmem; 14152118Smckusick 14252118Smckusick /* 14352118Smckusick * Allocate a TLB hash table for the kernel. 14452118Smckusick * This could be a KSEG0 address and thus save TLB entries but 14552118Smckusick * its faster and simpler in assembly language to have a 14652118Smckusick * fixed address that can be accessed with a 16 bit signed offset. 14752118Smckusick * Note: the kernel pm_hash field is null, user pm_hash fields are 14852118Smckusick * either the table or zero_pmap_hash. 14952118Smckusick */ 15052118Smckusick kernel_pmap_store.pm_hash = (pmap_hash_t)0; 15152118Smckusick for (i = 0; i < PMAP_HASH_KPAGES; i++) { 15252118Smckusick MachTLBWriteIndexed(i + UPAGES + PMAP_HASH_UPAGES, 15352118Smckusick PMAP_HASH_KADDR + (i << PGSHIFT), 15452118Smckusick firstaddr | PG_V | PG_M | PG_G); 15552118Smckusick firstaddr += NBPG; 15652118Smckusick } 15752118Smckusick 15852118Smckusick /* 15952118Smckusick * Allocate an empty TLB hash table for initial pmap's. 16052118Smckusick */ 16152118Smckusick zero_pmap_hash = (pmap_hash_t)firstaddr; 16252118Smckusick firstaddr += PMAP_HASH_UPAGES * NBPG; 16352118Smckusick 16452118Smckusick /* init proc[0]'s pmap hash table */ 16552118Smckusick for (i = 0; i < PMAP_HASH_UPAGES; i++) { 16652118Smckusick kernel_pmap_store.pm_hash_ptes[i] = 16752118Smckusick ((u_int)zero_pmap_hash + (i << PGSHIFT)) | PG_V | PG_RO; 16852118Smckusick MachTLBWriteIndexed(i + UPAGES, 16952118Smckusick (PMAP_HASH_UADDR + (i << PGSHIFT)) | 17052118Smckusick (1 << VMMACH_TLB_PID_SHIFT), 17152118Smckusick kernel_pmap_store.pm_hash_ptes[i]); 17252118Smckusick } 17352118Smckusick 17452118Smckusick /* 17552118Smckusick * Allocate memory for pv_table. 17652118Smckusick * This will allocate more entries than we really need. 17752118Smckusick * We should do this in pmap_init when we know the actual 17852118Smckusick * phys_start and phys_end but its better to use phys addresses 17952118Smckusick * rather than kernel virtual addresses mapped through the TLB. 18052118Smckusick */ 18152118Smckusick i = (maxmem - pmax_btop(firstaddr)) * sizeof(struct pv_entry); 18252118Smckusick i = pmax_round_page(i); 18352118Smckusick pv_table = (pv_entry_t)firstaddr; 18452118Smckusick firstaddr += i; 18552118Smckusick 18652118Smckusick /* 18752118Smckusick * Clear allocated memory. 18852118Smckusick */ 18952118Smckusick bzero((caddr_t)start, firstaddr - start); 19052118Smckusick 19152118Smckusick avail_start = firstaddr; 19252118Smckusick avail_end = pmax_ptob(maxmem); 19352118Smckusick mem_size = avail_end - avail_start; 19452118Smckusick 19552118Smckusick virtual_avail = VM_MIN_KERNEL_ADDRESS; 19652118Smckusick virtual_end = VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES * NPTEPG * NBPG; 19752118Smckusick /* XXX need to decide how to set cnt.v_page_size */ 19852118Smckusick pmaxpagesperpage = 1; 19952118Smckusick 200*52606Smckusick cur_pmap = &kernel_pmap_store; 20152118Smckusick simple_lock_init(&kernel_pmap->pm_lock); 20252118Smckusick kernel_pmap->pm_count = 1; 20352118Smckusick } 20452118Smckusick 20552118Smckusick /* 20652118Smckusick * Bootstrap memory allocator. This function allows for early dynamic 20752118Smckusick * memory allocation until the virtual memory system has been bootstrapped. 20852118Smckusick * After that point, either kmem_alloc or malloc should be used. This 20952118Smckusick * function works by stealing pages from the (to be) managed page pool, 21052118Smckusick * stealing virtual address space, then mapping the pages and zeroing them. 21152118Smckusick * 21252118Smckusick * It should be used from pmap_bootstrap till vm_page_startup, afterwards 21352118Smckusick * it cannot be used, and will generate a panic if tried. Note that this 21452118Smckusick * memory will never be freed, and in essence it is wired down. 21552118Smckusick */ 21652118Smckusick void * 21752118Smckusick pmap_bootstrap_alloc(size) 21852118Smckusick int size; 21952118Smckusick { 22052118Smckusick vm_offset_t val; 22152118Smckusick extern boolean_t vm_page_startup_initialized; 22252118Smckusick 22352118Smckusick if (vm_page_startup_initialized) 22452118Smckusick panic("pmap_bootstrap_alloc: called after startup initialized"); 22552118Smckusick 22652118Smckusick val = avail_start; 22752118Smckusick size = round_page(size); 22852118Smckusick avail_start += size; 22952118Smckusick 23052118Smckusick blkclr((caddr_t) val, size); 23152118Smckusick return ((void *) val); 23252118Smckusick } 23352118Smckusick 23452118Smckusick /* 23552118Smckusick * Initialize the pmap module. 23652118Smckusick * Called by vm_init, to initialize any structures that the pmap 23752118Smckusick * system needs to map virtual memory. 23852118Smckusick */ 23952118Smckusick void 24052118Smckusick pmap_init(phys_start, phys_end) 24152118Smckusick vm_offset_t phys_start, phys_end; 24252118Smckusick { 24352118Smckusick 24452118Smckusick #ifdef DEBUG 24552118Smckusick if (pmapdebug & PDB_FOLLOW) 24652118Smckusick printf("pmap_init(%x, %x)\n", phys_start, phys_end); 24752118Smckusick #endif 24852118Smckusick } 24952118Smckusick 25052118Smckusick /* 25152118Smckusick * Used to map a range of physical addresses into kernel 25252118Smckusick * virtual address space. 25352118Smckusick * 25452118Smckusick * This routine should only be called by vm_page_startup() 25552118Smckusick * with KSEG0 addresses. 25652118Smckusick */ 25752118Smckusick vm_offset_t 25852118Smckusick pmap_map(virt, start, end, prot) 25952118Smckusick vm_offset_t virt; 26052118Smckusick vm_offset_t start; 26152118Smckusick vm_offset_t end; 26252118Smckusick int prot; 26352118Smckusick { 26452118Smckusick 26552118Smckusick #ifdef DEBUG 26652118Smckusick if (pmapdebug & PDB_FOLLOW) 26752118Smckusick printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot); 26852118Smckusick #endif 26952118Smckusick 27052118Smckusick return(round_page(end)); 27152118Smckusick } 27252118Smckusick 27352118Smckusick /* 27452118Smckusick * Create and return a physical map. 27552118Smckusick * 27652118Smckusick * If the size specified for the map 27752118Smckusick * is zero, the map is an actual physical 27852118Smckusick * map, and may be referenced by the 27952118Smckusick * hardware. 28052118Smckusick * 28152118Smckusick * If the size specified is non-zero, 28252118Smckusick * the map will be used in software only, and 28352118Smckusick * is bounded by that size. 28452118Smckusick */ 28552118Smckusick pmap_t 28652118Smckusick pmap_create(size) 28752118Smckusick vm_size_t size; 28852118Smckusick { 28952118Smckusick register pmap_t pmap; 29052118Smckusick 29152118Smckusick #ifdef DEBUG 29252118Smckusick if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) 29352118Smckusick printf("pmap_create(%x)\n", size); 29452118Smckusick #endif 29552118Smckusick /* 29652118Smckusick * Software use map does not need a pmap 29752118Smckusick */ 29852118Smckusick if (size) 29952118Smckusick return(NULL); 30052118Smckusick 30152118Smckusick printf("pmap_create(%x) XXX\n", size); /* XXX */ 30252118Smckusick /* XXX: is it ok to wait here? */ 30352118Smckusick pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK); 30452118Smckusick #ifdef notifwewait 30552118Smckusick if (pmap == NULL) 30652118Smckusick panic("pmap_create: cannot allocate a pmap"); 30752118Smckusick #endif 30852118Smckusick bzero(pmap, sizeof(*pmap)); 30952118Smckusick pmap_pinit(pmap); 31052118Smckusick return (pmap); 31152118Smckusick } 31252118Smckusick 31352118Smckusick /* 31452118Smckusick * Initialize a preallocated and zeroed pmap structure, 31552118Smckusick * such as one in a vmspace structure. 31652118Smckusick */ 31752118Smckusick void 31852118Smckusick pmap_pinit(pmap) 31952118Smckusick register struct pmap *pmap; 32052118Smckusick { 32152118Smckusick register int i; 32252118Smckusick extern struct vmspace vmspace0; 32352118Smckusick 32452118Smckusick #ifdef DEBUG 32552118Smckusick if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) 32652118Smckusick printf("pmap_pinit(%x)\n", pmap); 32752118Smckusick #endif 32852118Smckusick simple_lock_init(&pmap->pm_lock); 32952118Smckusick pmap->pm_count = 1; 33052118Smckusick pmap->pm_flags = 0; 33152118Smckusick pmap->pm_hash = zero_pmap_hash; 33252118Smckusick for (i = 0; i < PMAP_HASH_UPAGES; i++) 33352118Smckusick pmap->pm_hash_ptes[i] = 33452118Smckusick ((u_int)zero_pmap_hash + (i << PGSHIFT)) | PG_V | PG_RO; 33552118Smckusick if (pmap == &vmspace0.vm_pmap) 33652118Smckusick pmap->pm_tlbpid = 1; /* preallocated in mach_init() */ 33752118Smckusick else 33852118Smckusick pmap->pm_tlbpid = -1; /* none allocated yet */ 33952118Smckusick } 34052118Smckusick 34152118Smckusick /* 34252118Smckusick * Retire the given physical map from service. 34352118Smckusick * Should only be called if the map contains 34452118Smckusick * no valid mappings. 34552118Smckusick */ 34652118Smckusick void 34752118Smckusick pmap_destroy(pmap) 34852118Smckusick register pmap_t pmap; 34952118Smckusick { 35052118Smckusick int count; 35152118Smckusick 35252118Smckusick #ifdef DEBUG 35352118Smckusick if (pmapdebug & PDB_FOLLOW) 35452118Smckusick printf("pmap_destroy(%x)\n", pmap); 35552118Smckusick #endif 35652118Smckusick if (pmap == NULL) 35752118Smckusick return; 35852118Smckusick 35952118Smckusick printf("pmap_destroy(%x) XXX\n", pmap); /* XXX */ 36052118Smckusick simple_lock(&pmap->pm_lock); 36152118Smckusick count = --pmap->pm_count; 36252118Smckusick simple_unlock(&pmap->pm_lock); 36352118Smckusick if (count == 0) { 36452118Smckusick pmap_release(pmap); 36552118Smckusick free((caddr_t)pmap, M_VMPMAP); 36652118Smckusick } 36752118Smckusick } 36852118Smckusick 36952118Smckusick /* 37052118Smckusick * Release any resources held by the given physical map. 37152118Smckusick * Called when a pmap initialized by pmap_pinit is being released. 37252118Smckusick * Should only be called if the map contains no valid mappings. 37352118Smckusick */ 37452118Smckusick void 37552118Smckusick pmap_release(pmap) 37652118Smckusick register pmap_t pmap; 37752118Smckusick { 37852118Smckusick register int id; 37952118Smckusick #ifdef DIAGNOSTIC 38052118Smckusick register int i; 38152118Smckusick #endif 38252118Smckusick 38352118Smckusick #ifdef DEBUG 38452118Smckusick if (pmapdebug & PDB_FOLLOW) 38552118Smckusick printf("pmap_release(%x)\n", pmap); 38652118Smckusick #endif 38752118Smckusick 38852118Smckusick if (pmap->pm_hash && pmap->pm_hash != zero_pmap_hash) { 38952118Smckusick kmem_free(kernel_map, (vm_offset_t)pmap->pm_hash, 39052118Smckusick PMAP_HASH_SIZE); 39152118Smckusick pmap->pm_hash = zero_pmap_hash; 39252118Smckusick } 39352118Smckusick if ((id = pmap->pm_tlbpid) < 0) 39452118Smckusick return; 39552118Smckusick #ifdef DIAGNOSTIC 39652118Smckusick if (!(whichpids[id >> 5] & (1 << (id & 0x1F)))) 39752118Smckusick panic("pmap_release: id free"); 39852118Smckusick #endif 39952118Smckusick MachTLBFlushPID(id); 40052118Smckusick whichpids[id >> 5] &= ~(1 << (id & 0x1F)); 40152118Smckusick pmap->pm_flags &= ~PM_MODIFIED; 40252118Smckusick pmap->pm_tlbpid = -1; 40352118Smckusick if (pmap == cur_pmap) 40452118Smckusick cur_pmap = (pmap_t)0; 40552118Smckusick #ifdef DIAGNOSTIC 40652118Smckusick /* invalidate user PTE cache */ 40752118Smckusick for (i = 0; i < PMAP_HASH_UPAGES; i++) 40852118Smckusick MachTLBWriteIndexed(i + UPAGES, MACH_RESERVED_ADDR, 0); 40952118Smckusick #endif 41052118Smckusick } 41152118Smckusick 41252118Smckusick /* 41352118Smckusick * Add a reference to the specified pmap. 41452118Smckusick */ 41552118Smckusick void 41652118Smckusick pmap_reference(pmap) 41752118Smckusick pmap_t pmap; 41852118Smckusick { 41952118Smckusick 42052118Smckusick #ifdef DEBUG 42152118Smckusick if (pmapdebug & PDB_FOLLOW) 42252118Smckusick printf("pmap_reference(%x)\n", pmap); 42352118Smckusick #endif 42452118Smckusick if (pmap != NULL) { 42552118Smckusick simple_lock(&pmap->pm_lock); 42652118Smckusick pmap->pm_count++; 42752118Smckusick simple_unlock(&pmap->pm_lock); 42852118Smckusick } 42952118Smckusick } 43052118Smckusick 43152118Smckusick /* 43252118Smckusick * Remove the given range of addresses from the specified map. 43352118Smckusick * 43452118Smckusick * It is assumed that the start and end are properly 43552118Smckusick * rounded to the page size. 43652118Smckusick */ 43752118Smckusick void 43852118Smckusick pmap_remove(pmap, sva, eva) 43952118Smckusick register pmap_t pmap; 44052118Smckusick vm_offset_t sva, eva; 44152118Smckusick { 44252118Smckusick register vm_offset_t va; 44352118Smckusick register pv_entry_t pv, npv; 44452118Smckusick pmap_hash_t hp; 44552118Smckusick unsigned entry; 44652118Smckusick 44752118Smckusick #ifdef DEBUG 44852118Smckusick if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) 44952118Smckusick printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva); 45052118Smckusick remove_stats.calls++; 45152118Smckusick #endif 45252118Smckusick if (pmap == NULL) 45352118Smckusick return; 45452118Smckusick 45552118Smckusick /* anything in the cache? */ 45652118Smckusick if (pmap->pm_tlbpid < 0 || pmap->pm_hash == zero_pmap_hash) 45752118Smckusick return; 45852118Smckusick 45952118Smckusick if (!pmap->pm_hash) { 46052118Smckusick register pt_entry_t *pte; 46152118Smckusick 46252118Smckusick /* remove entries from kernel pmap */ 46352118Smckusick pte = kvtopte(sva); 46452118Smckusick for (va = sva; va < eva; va += NBPG, pte++) { 46552118Smckusick entry = pte->pt_entry; 46652118Smckusick if (!(entry & PG_V)) 46752118Smckusick continue; 46852118Smckusick if (entry & PG_WIRED) 46952118Smckusick pmap->pm_stats.wired_count--; 47052118Smckusick pmap->pm_stats.resident_count--; 47152118Smckusick pmap_remove_pv(pmap, va, entry & PG_FRAME); 47252118Smckusick #ifdef ATTR 47352118Smckusick pmap_attributes[atop(entry - KERNBASE)] = 0; 47452118Smckusick #endif 47552118Smckusick pte->pt_entry = PG_NV; 47652118Smckusick /* 47752118Smckusick * Flush the TLB for the given address. 47852118Smckusick */ 47952118Smckusick MachTLBFlushAddr(va); 48052118Smckusick #ifdef DEBUG 48152118Smckusick remove_stats.flushes++; 48252118Smckusick #endif 48352118Smckusick } 48452118Smckusick return; 48552118Smckusick } 48652118Smckusick 48752118Smckusick va = sva | (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT); 48852118Smckusick eva |= (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT); 48952118Smckusick /* 49052118Smckusick * If we are not in the current address space, just flush the 49152118Smckusick * software cache and not the hardware. 49252118Smckusick */ 49352118Smckusick if (pmap != cur_pmap) { 49452118Smckusick for (; va < eva; va += NBPG) { 49552118Smckusick hp = &pmap->pm_hash[PMAP_HASH(va)]; 49652118Smckusick if (hp->high != va) 49752118Smckusick continue; 49852118Smckusick 49952118Smckusick hp->high = 0; 50052118Smckusick entry = hp->low; 50152118Smckusick if (entry & PG_WIRED) 50252118Smckusick pmap->pm_stats.wired_count--; 50352118Smckusick pmap->pm_stats.resident_count--; 50452118Smckusick pmap_remove_pv(pmap, va & PG_FRAME, entry & PG_FRAME); 50552118Smckusick #ifdef ATTR 50652118Smckusick pmap_attributes[atop(entry - KERNBASE)] = 0; 50752118Smckusick #endif 50852118Smckusick pmap->pm_flags |= PM_MODIFIED; 50952118Smckusick #ifdef DEBUG 51052118Smckusick remove_stats.removes++; 51152118Smckusick #endif 51252118Smckusick } 51352118Smckusick return; 51452118Smckusick } 51552118Smckusick 51652118Smckusick for (; va < eva; va += NBPG) { 51752118Smckusick hp = &pmap->pm_hash[PMAP_HASH(va)]; 51852118Smckusick if (hp->high != va) 51952118Smckusick continue; 52052118Smckusick 52152118Smckusick hp->high = 0; 52252118Smckusick entry = hp->low; 52352118Smckusick if (entry & PG_WIRED) 52452118Smckusick pmap->pm_stats.wired_count--; 52552118Smckusick pmap->pm_stats.resident_count--; 52652118Smckusick pmap_remove_pv(pmap, va & PG_FRAME, entry & PG_FRAME); 52752118Smckusick #ifdef ATTR 52852118Smckusick pmap_attributes[atop(entry - KERNBASE)] = 0; 52952118Smckusick #endif 53052118Smckusick /* 53152118Smckusick * Flush the TLB for the given address. 53252118Smckusick */ 53352118Smckusick MachTLBFlushAddr(va); 53452118Smckusick #ifdef DEBUG 53552118Smckusick remove_stats.flushes++; 53652118Smckusick #endif 53752118Smckusick } 53852118Smckusick } 53952118Smckusick 54052118Smckusick /* 54152118Smckusick * pmap_page_protect: 54252118Smckusick * 54352118Smckusick * Lower the permission for all mappings to a given page. 54452118Smckusick */ 54552118Smckusick void 54652118Smckusick pmap_page_protect(pa, prot) 54752118Smckusick vm_offset_t pa; 54852118Smckusick vm_prot_t prot; 54952118Smckusick { 55052118Smckusick register pv_entry_t pv; 55152118Smckusick register vm_offset_t va; 55252118Smckusick int s; 55352118Smckusick 55452118Smckusick #ifdef DEBUG 55552118Smckusick if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) || 55652118Smckusick prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)) 55752118Smckusick printf("pmap_page_protect(%x, %x)\n", pa, prot); 55852118Smckusick #endif 55952118Smckusick if (!IS_VM_PHYSADDR(pa)) 56052118Smckusick return; 56152118Smckusick 56252118Smckusick switch (prot) { 56352118Smckusick case VM_PROT_ALL: 56452118Smckusick break; 56552118Smckusick 56652118Smckusick /* copy_on_write */ 56752118Smckusick case VM_PROT_READ: 56852118Smckusick case VM_PROT_READ|VM_PROT_EXECUTE: 56952118Smckusick pv = pa_to_pvh(pa); 57052118Smckusick s = splimp(); 57152118Smckusick /* 57252118Smckusick * Loop over all current mappings setting/clearing as appropos. 57352118Smckusick */ 57452118Smckusick if (pv->pv_pmap != NULL) { 57552118Smckusick for (; pv; pv = pv->pv_next) { 57652118Smckusick extern vm_offset_t pager_sva, pager_eva; 57752118Smckusick va = pv->pv_va; 57852118Smckusick 57952118Smckusick /* 58052118Smckusick * XXX don't write protect pager mappings 58152118Smckusick */ 58252118Smckusick if (va >= pager_sva && va < pager_eva) 58352118Smckusick continue; 58452118Smckusick pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE, 58552118Smckusick prot); 58652118Smckusick } 58752118Smckusick } 58852118Smckusick splx(s); 58952118Smckusick break; 59052118Smckusick 59152118Smckusick /* remove_all */ 59252118Smckusick default: 59352118Smckusick pv = pa_to_pvh(pa); 59452118Smckusick s = splimp(); 59552118Smckusick while (pv->pv_pmap != NULL) { 59652118Smckusick pmap_remove(pv->pv_pmap, pv->pv_va, 59752118Smckusick pv->pv_va + PAGE_SIZE); 59852118Smckusick } 59952118Smckusick splx(s); 60052118Smckusick } 60152118Smckusick } 60252118Smckusick 60352118Smckusick /* 60452118Smckusick * Set the physical protection on the 60552118Smckusick * specified range of this map as requested. 60652118Smckusick */ 60752118Smckusick void 60852118Smckusick pmap_protect(pmap, sva, eva, prot) 60952118Smckusick register pmap_t pmap; 61052118Smckusick vm_offset_t sva, eva; 61152118Smckusick vm_prot_t prot; 61252118Smckusick { 61352118Smckusick register vm_offset_t va; 61452118Smckusick pmap_hash_t hp; 61552118Smckusick u_int p; 61652118Smckusick 61752118Smckusick #ifdef DEBUG 61852118Smckusick if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) 61952118Smckusick printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot); 62052118Smckusick #endif 62152118Smckusick if (pmap == NULL) 62252118Smckusick return; 62352118Smckusick 62452118Smckusick /* anything in the software cache? */ 62552118Smckusick if (pmap->pm_tlbpid < 0 || pmap->pm_hash == zero_pmap_hash) 62652118Smckusick return; 62752118Smckusick 62852118Smckusick if (!(prot & VM_PROT_READ)) { 62952118Smckusick pmap_remove(pmap, sva, eva); 63052118Smckusick return; 63152118Smckusick } 63252118Smckusick 63352118Smckusick if (!pmap->pm_hash) { 63452118Smckusick register pt_entry_t *pte; 63552118Smckusick 63652118Smckusick /* 63752118Smckusick * Change entries in kernel pmap. 63852118Smckusick * This will trap if the page is writeable (in order to set 63952118Smckusick * the dirty bit) even if the dirty bit is already set. The 64052118Smckusick * optimization isn't worth the effort since this code isn't 64152118Smckusick * executed much. The common case is to make a user page 64252118Smckusick * read-only. 64352118Smckusick */ 64452118Smckusick p = (prot & VM_PROT_WRITE) ? PG_RW : PG_RO; 64552118Smckusick pte = kvtopte(sva); 64652118Smckusick for (va = sva; va < eva; va += NBPG, pte++) { 64752118Smckusick if (!(pte->pt_entry & PG_V)) 64852118Smckusick continue; 64952118Smckusick pte->pt_entry = (pte->pt_entry & ~(PG_M | PG_RO)) | p; 65052118Smckusick /* 65152118Smckusick * Update the TLB if the given address is in the cache. 65252118Smckusick */ 65352118Smckusick MachTLBUpdate(va, pte->pt_entry); 65452118Smckusick } 65552118Smckusick return; 65652118Smckusick } 65752118Smckusick 65852118Smckusick p = (prot & VM_PROT_WRITE) ? PG_RW : PG_RO; 65952118Smckusick va = sva | (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT); 66052118Smckusick eva |= (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT); 66152118Smckusick /* 66252118Smckusick * If we are not in the current address space, just flush the 66352118Smckusick * software cache and not the hardware. 66452118Smckusick */ 66552118Smckusick if (pmap != cur_pmap) { 66652118Smckusick for (; va < eva; va += NBPG) { 66752118Smckusick hp = &pmap->pm_hash[PMAP_HASH(va)]; 66852118Smckusick if (hp->high != va) 66952118Smckusick continue; 67052118Smckusick 67152118Smckusick hp->low = (hp->low & ~(PG_M | PG_RO)) | p; 67252118Smckusick pmap->pm_flags |= PM_MODIFIED; 67352118Smckusick } 67452118Smckusick return; 67552118Smckusick } 67652118Smckusick 67752118Smckusick for (; va < eva; va += NBPG) { 67852118Smckusick hp = &pmap->pm_hash[PMAP_HASH(va)]; 67952118Smckusick if (hp->high != va) 68052118Smckusick continue; 68152118Smckusick 68252118Smckusick hp->low = (hp->low & ~(PG_M | PG_RO)) | p; 68352118Smckusick /* 68452118Smckusick * Update the TLB if the given address is in the cache. 68552118Smckusick */ 68652118Smckusick MachTLBUpdate(hp->high, hp->low); 68752118Smckusick } 68852118Smckusick } 68952118Smckusick 69052118Smckusick /* 69152118Smckusick * Insert the given physical page (p) at 69252118Smckusick * the specified virtual address (v) in the 69352118Smckusick * target physical map with the protection requested. 69452118Smckusick * 69552118Smckusick * If specified, the page will be wired down, meaning 69652118Smckusick * that the related pte can not be reclaimed. 69752118Smckusick * 69852118Smckusick * NB: This is the only routine which MAY NOT lazy-evaluate 69952118Smckusick * or lose information. That is, this routine must actually 70052118Smckusick * insert this page into the given map NOW. 70152118Smckusick */ 70252118Smckusick void 70352118Smckusick pmap_enter(pmap, va, pa, prot, wired) 70452118Smckusick register pmap_t pmap; 70552118Smckusick vm_offset_t va; 70652118Smckusick register vm_offset_t pa; 70752118Smckusick vm_prot_t prot; 70852118Smckusick boolean_t wired; 70952118Smckusick { 71052118Smckusick register pmap_hash_t hp; 71152118Smckusick register u_int npte; 71252118Smckusick register int i; 71352118Smckusick 71452118Smckusick #ifdef DEBUG 71552118Smckusick if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) 71652118Smckusick printf("pmap_enter(%x, %x, %x, %x, %x)\n", 71752118Smckusick pmap, va, pa, prot, wired); 71852118Smckusick #endif 71952118Smckusick #ifdef DIAGNOSTIC 72052118Smckusick if (!pmap) 72152118Smckusick panic("pmap_enter: pmap"); 72252118Smckusick if (pmap->pm_tlbpid < 0) 72352118Smckusick panic("pmap_enter: tlbpid"); 72452118Smckusick if (pmap == kernel_pmap) { 72552118Smckusick enter_stats.kernel++; 72652118Smckusick if ((va & 0xE0000000) != 0xC0000000) 72752118Smckusick panic("pmap_enter: kva"); 72852118Smckusick } else { 72952118Smckusick enter_stats.user++; 73052118Smckusick if (va & 0x80000000) 73152118Smckusick panic("pmap_enter: uva"); 73252118Smckusick } 73352118Smckusick if (!(prot & VM_PROT_READ)) 73452118Smckusick panic("pmap_enter: prot"); 73552118Smckusick #endif 73652118Smckusick 73752118Smckusick /* 73852118Smckusick * See if we need to create a new TLB cache. 73952118Smckusick */ 74052118Smckusick if (pmap->pm_hash == zero_pmap_hash) { 74152118Smckusick register vm_offset_t kva; 74252118Smckusick register pt_entry_t *pte; 74352118Smckusick 74452118Smckusick kva = kmem_alloc(kernel_map, PMAP_HASH_SIZE); 74552118Smckusick pmap->pm_hash = (pmap_hash_t)kva; 74652118Smckusick 74752118Smckusick /* 74852118Smckusick * Convert the kernel virtual address to a physical one 74952118Smckusick * and cache it in the pmap. Note: if the phyical address 75052118Smckusick * can change (due to memory compaction in kmem_alloc?), 75152118Smckusick * we will have to update things. 75252118Smckusick */ 75352118Smckusick pte = kvtopte(kva); 75452118Smckusick for (i = 0; i < PMAP_HASH_UPAGES; i++) { 75552118Smckusick pmap->pm_hash_ptes[i] = pte->pt_entry & ~PG_G; 75652118Smckusick pte++; 75752118Smckusick } 75852118Smckusick 75952118Smckusick /* 76052118Smckusick * Map in new TLB cache if it is current. 76152118Smckusick */ 76252118Smckusick if (pmap == cur_pmap) { 76352118Smckusick #ifdef DIAGNOSTIC 76452118Smckusick if (pmap->pm_tlbpid < 0) 76552118Smckusick panic("pmap_enter: tlbpid"); 76652118Smckusick #endif 76752118Smckusick for (i = 0; i < PMAP_HASH_UPAGES; i++) { 76852118Smckusick MachTLBWriteIndexed(i + UPAGES, 76952118Smckusick (PMAP_HASH_UADDR + (i << PGSHIFT)) | 77052118Smckusick (pmap->pm_tlbpid << 77152118Smckusick VMMACH_TLB_PID_SHIFT), 77252118Smckusick pmap->pm_hash_ptes[i]); 77352118Smckusick } 77452118Smckusick } 77552118Smckusick #ifdef DIAGNOSTIC 77652118Smckusick for (i = 0; i < PAGE_SIZE; i += sizeof(int), kva += sizeof(int)) 77752118Smckusick if (*(int *)kva != 0) 77852118Smckusick panic("pmap_enter: *kva != 0"); 77952118Smckusick #endif 78052118Smckusick } 78152118Smckusick 78252118Smckusick if (IS_VM_PHYSADDR(pa)) { 78352118Smckusick register pv_entry_t pv, npv; 78452118Smckusick int s; 78552118Smckusick 78652118Smckusick if (!(prot & VM_PROT_WRITE)) 78752118Smckusick npte = PG_RO; 78852118Smckusick else { 78952118Smckusick register vm_page_t mem; 79052118Smckusick 79152118Smckusick mem = PHYS_TO_VM_PAGE(pa); 79252118Smckusick if ((int)va < 0) { 79352118Smckusick /* 79452118Smckusick * Don't bother to trap on kernel writes, 79552118Smckusick * just record page as dirty. 79652118Smckusick */ 79752118Smckusick npte = PG_M; 79852118Smckusick mem->clean = FALSE; 79952118Smckusick } else 80052118Smckusick #ifdef ATTR 80152118Smckusick if ((pmap_attributes[atop(pa - KERNBASE)] & 80252118Smckusick PMAP_ATTR_MOD) || !mem->clean) 80352118Smckusick #else 80452118Smckusick if (!mem->clean) 80552118Smckusick #endif 80652118Smckusick npte = PG_M; 80752118Smckusick else 80852118Smckusick npte = 0; 80952118Smckusick } 81052118Smckusick 81152118Smckusick #ifdef DEBUG 81252118Smckusick enter_stats.managed++; 81352118Smckusick #endif 81452118Smckusick /* 81552118Smckusick * Enter the pmap and virtual address into the 81652118Smckusick * physical to virtual map table. 81752118Smckusick */ 81852118Smckusick pv = pa_to_pvh(pa); 81952118Smckusick s = splimp(); 82052118Smckusick #ifdef DEBUG 82152118Smckusick if (pmapdebug & PDB_ENTER) 82252118Smckusick printf("pmap_enter: pv %x: was %x/%x/%x\n", 82352118Smckusick pv, pv->pv_va, pv->pv_pmap, pv->pv_next); 82452118Smckusick #endif 82552118Smckusick if (pv->pv_pmap == NULL) { 82652118Smckusick /* 82752118Smckusick * No entries yet, use header as the first entry 82852118Smckusick */ 82952118Smckusick #ifdef DEBUG 83052118Smckusick enter_stats.firstpv++; 83152118Smckusick #endif 83252118Smckusick pv->pv_va = va; 83352118Smckusick pv->pv_pmap = pmap; 83452118Smckusick pv->pv_next = NULL; 83552118Smckusick pv->pv_flags = 0; 83652118Smckusick } else { 83752118Smckusick /* 83852118Smckusick * There is at least one other VA mapping this page. 83952118Smckusick * Place this entry after the header. 84052118Smckusick * 84152118Smckusick * Note: the entry may already be in the table if 84252118Smckusick * we are only changing the protection bits. 84352118Smckusick */ 84452118Smckusick for (npv = pv; npv; npv = npv->pv_next) 84552118Smckusick if (pmap == npv->pv_pmap && va == npv->pv_va) { 84652118Smckusick #ifdef DIAGNOSTIC 84752118Smckusick if (!pmap->pm_hash) { 84852118Smckusick unsigned entry; 84952118Smckusick 85052118Smckusick entry = kvtopte(va)->pt_entry; 85152118Smckusick if (!(entry & PG_V) || 85252118Smckusick (entry & PG_FRAME) != pa) 85352118Smckusick printf("found kva %x pa %x in pv_table but != %x\n", 85452118Smckusick va, pa, entry); 85552118Smckusick } else { 85652118Smckusick hp = &pmap->pm_hash[PMAP_HASH(va)]; 85752118Smckusick if (hp->high != (va | 85852118Smckusick (pmap->pm_tlbpid << 85952118Smckusick VMMACH_TLB_PID_SHIFT)) || 86052118Smckusick (hp->low & PG_FRAME) != pa) 86152118Smckusick printf("found va %x pa %x in pv_table but != %x %x\n", 86252118Smckusick va, pa, hp->high, hp->low); 86352118Smckusick } 86452118Smckusick #endif 86552118Smckusick goto fnd; 86652118Smckusick } 86752118Smckusick /* can this cause us to recurse forever? */ 86852118Smckusick npv = (pv_entry_t) 86952118Smckusick malloc(sizeof *npv, M_VMPVENT, M_NOWAIT); 87052118Smckusick npv->pv_va = va; 87152118Smckusick npv->pv_pmap = pmap; 87252118Smckusick npv->pv_next = pv->pv_next; 87352118Smckusick pv->pv_next = npv; 87452118Smckusick #ifdef DEBUG 87552118Smckusick if (!npv->pv_next) 87652118Smckusick enter_stats.secondpv++; 87752118Smckusick #endif 87852118Smckusick fnd: 87952118Smckusick ; 88052118Smckusick } 88152118Smckusick splx(s); 88252118Smckusick } else { 88352118Smckusick /* 88452118Smckusick * Assumption: if it is not part of our managed memory 88552118Smckusick * then it must be device memory which may be volitile. 88652118Smckusick */ 88752118Smckusick #ifdef DEBUG 88852118Smckusick enter_stats.unmanaged++; 88952118Smckusick #endif 89052118Smckusick printf("pmap_enter: UNMANAGED ADDRESS va %x pa %x\n", 89152118Smckusick va, pa); /* XXX */ 89252118Smckusick npte = (prot & VM_PROT_WRITE) ? PG_M : PG_RO; 89352118Smckusick } 89452118Smckusick 89552118Smckusick if (!pmap->pm_hash) { 89652118Smckusick register pt_entry_t *pte; 89752118Smckusick 89852118Smckusick /* enter entries into kernel pmap */ 89952118Smckusick pte = kvtopte(va); 90052118Smckusick npte |= pa | PG_V | PG_G; 90152118Smckusick if (wired) { 90252118Smckusick pmap->pm_stats.wired_count += pmaxpagesperpage; 90352118Smckusick npte |= PG_WIRED; 90452118Smckusick } 90552118Smckusick i = pmaxpagesperpage; 90652118Smckusick do { 90752118Smckusick if (!(pte->pt_entry & PG_V)) { 90852118Smckusick pmap->pm_stats.resident_count++; 90952118Smckusick MachTLBWriteRandom(va, npte); 91052118Smckusick } else { 91152118Smckusick /* 91252118Smckusick * Update the same virtual address entry. 91352118Smckusick */ 91452118Smckusick MachTLBUpdate(va, npte); 91552118Smckusick } 91652118Smckusick pte->pt_entry = npte; 91752118Smckusick va += NBPG; 91852118Smckusick npte += NBPG; 91952118Smckusick pte++; 92052118Smckusick } while (--i != 0); 92152118Smckusick return; 92252118Smckusick } 92352118Smckusick 92452118Smckusick /* 92552118Smckusick * Now validate mapping with desired protection/wiring. 92652118Smckusick * Assume uniform modified and referenced status for all 92752118Smckusick * PMAX pages in a MACH page. 92852118Smckusick */ 92952118Smckusick npte |= pa | PG_V; 93052118Smckusick if (wired) { 93152118Smckusick pmap->pm_stats.wired_count += pmaxpagesperpage; 93252118Smckusick npte |= PG_WIRED; 93352118Smckusick } 93452118Smckusick #ifdef DEBUG 93552118Smckusick if (pmapdebug & PDB_ENTER) 93652118Smckusick printf("pmap_enter: new pte value %x\n", npte); 93752118Smckusick #endif 93852118Smckusick va |= (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT); 93952118Smckusick i = pmaxpagesperpage; 94052118Smckusick do { 94152118Smckusick hp = &pmap->pm_hash[PMAP_HASH(va)]; 94252118Smckusick if (!hp->high) { 94352118Smckusick pmap->pm_stats.resident_count++; 94452118Smckusick hp->high = va; 94552118Smckusick hp->low = npte; 94652118Smckusick MachTLBWriteRandom(va, npte); 94752118Smckusick } else { 94852118Smckusick #ifdef DEBUG 94952118Smckusick enter_stats.cachehit++; 95052118Smckusick #endif 95152118Smckusick if (hp->high == va) { 95252118Smckusick /* 95352118Smckusick * Update the same entry. 95452118Smckusick */ 95552118Smckusick hp->low = npte; 95652118Smckusick MachTLBUpdate(va, npte); 95752118Smckusick } else if (!(hp->low & PG_WIRED)) { 95852118Smckusick MachTLBFlushAddr(hp->high); 95952118Smckusick pmap_remove_pv(pmap, hp->high & PG_FRAME, 96052118Smckusick hp->low & PG_FRAME); 96152118Smckusick hp->high = va; 96252118Smckusick hp->low = npte; 96352118Smckusick MachTLBWriteRandom(va, npte); 96452118Smckusick } else { 96552118Smckusick /* 96652118Smckusick * Don't replace wired entries, just update 96752118Smckusick * the hardware TLB. 96852118Smckusick * Bug: routines to flush the TLB won't know 96952118Smckusick * that the entry is in the hardware. 97052118Smckusick */ 97152118Smckusick printf("pmap_enter: wired va %x %x\n", va, 97252118Smckusick hp->low); /* XXX */ 97352118Smckusick panic("pmap_enter: wired"); /* XXX */ 97452118Smckusick MachTLBWriteRandom(va, npte); 97552118Smckusick } 97652118Smckusick } 97752118Smckusick va += NBPG; 97852118Smckusick npte += NBPG; 97952118Smckusick } while (--i != 0); 98052118Smckusick } 98152118Smckusick 98252118Smckusick /* 98352118Smckusick * Routine: pmap_change_wiring 98452118Smckusick * Function: Change the wiring attribute for a map/virtual-address 98552118Smckusick * pair. 98652118Smckusick * In/out conditions: 98752118Smckusick * The mapping must already exist in the pmap. 98852118Smckusick */ 98952118Smckusick void 99052118Smckusick pmap_change_wiring(pmap, va, wired) 99152118Smckusick register pmap_t pmap; 99252118Smckusick vm_offset_t va; 99352118Smckusick boolean_t wired; 99452118Smckusick { 99552118Smckusick register pmap_hash_t hp; 99652118Smckusick u_int p; 99752118Smckusick int i; 99852118Smckusick 99952118Smckusick #ifdef DEBUG 100052118Smckusick if (pmapdebug & PDB_FOLLOW) 100152118Smckusick printf("pmap_change_wiring(%x, %x, %x)\n", pmap, va, wired); 100252118Smckusick #endif 100352118Smckusick if (pmap == NULL) 100452118Smckusick return; 100552118Smckusick 100652118Smckusick p = wired ? PG_WIRED : 0; 100752118Smckusick 100852118Smckusick /* 100952118Smckusick * Don't need to flush the TLB since PG_WIRED is only in software. 101052118Smckusick */ 101152118Smckusick if (!pmap->pm_hash) { 101252118Smckusick register pt_entry_t *pte; 101352118Smckusick 101452118Smckusick /* change entries in kernel pmap */ 101552118Smckusick pte = kvtopte(va); 101652118Smckusick i = pmaxpagesperpage; 101752118Smckusick if (!(pte->pt_entry & PG_WIRED) && p) 101852118Smckusick pmap->pm_stats.wired_count += i; 101952118Smckusick else if ((pte->pt_entry & PG_WIRED) && !p) 102052118Smckusick pmap->pm_stats.wired_count -= i; 102152118Smckusick do { 102252118Smckusick if (!(pte->pt_entry & PG_V)) 102352118Smckusick continue; 102452118Smckusick pte->pt_entry = (pte->pt_entry & ~PG_WIRED) | p; 102552118Smckusick pte++; 102652118Smckusick } while (--i != 0); 102752118Smckusick } else if (pmap->pm_tlbpid >= 0 && pmap->pm_hash != zero_pmap_hash) { 102852118Smckusick i = pmaxpagesperpage; 102952118Smckusick do { 103052118Smckusick hp = &pmap->pm_hash[PMAP_HASH(va)]; 103152118Smckusick if (!hp->high) 103252118Smckusick continue; 103352118Smckusick if (!(hp->low & PG_WIRED) && p) 103452118Smckusick pmap->pm_stats.wired_count++; 103552118Smckusick else if ((hp->low & PG_WIRED) && !p) 103652118Smckusick pmap->pm_stats.wired_count--; 103752118Smckusick hp->low = (hp->low & ~PG_WIRED) | p; 103852118Smckusick va += NBPG; 103952118Smckusick } while (--i != 0); 104052118Smckusick } 104152118Smckusick } 104252118Smckusick 104352118Smckusick /* 104452118Smckusick * Routine: pmap_extract 104552118Smckusick * Function: 104652118Smckusick * Extract the physical page address associated 104752118Smckusick * with the given map/virtual_address pair. 104852118Smckusick */ 104952118Smckusick vm_offset_t 105052118Smckusick pmap_extract(pmap, va) 105152118Smckusick register pmap_t pmap; 105252118Smckusick vm_offset_t va; 105352118Smckusick { 105452118Smckusick register vm_offset_t pa; 105552118Smckusick register pmap_hash_t hp; 105652118Smckusick 105752118Smckusick #ifdef DEBUG 105852118Smckusick if (pmapdebug & PDB_FOLLOW) 105952118Smckusick printf("pmap_extract(%x, %x) -> ", pmap, va); 106052118Smckusick #endif 106152118Smckusick 106252118Smckusick if (!pmap->pm_hash) 106352118Smckusick pa = kvtopte(va)->pt_entry & PG_FRAME; 106452118Smckusick else if (pmap->pm_tlbpid >= 0) { 106552118Smckusick hp = &pmap->pm_hash[PMAP_HASH(va)]; 106652118Smckusick if (hp->high) 106752118Smckusick pa = hp->low & PG_FRAME; 106852118Smckusick else 106952118Smckusick pa = 0; 107052118Smckusick } else 107152118Smckusick pa = 0; 107252118Smckusick 107352118Smckusick #ifdef DEBUG 107452118Smckusick if (pmapdebug & PDB_FOLLOW) 107552118Smckusick printf("%x\n", pa); 107652118Smckusick #endif 107752118Smckusick return(pa); 107852118Smckusick } 107952118Smckusick 108052118Smckusick /* 108152118Smckusick * Copy the range specified by src_addr/len 108252118Smckusick * from the source map to the range dst_addr/len 108352118Smckusick * in the destination map. 108452118Smckusick * 108552118Smckusick * This routine is only advisory and need not do anything. 108652118Smckusick */ 108752118Smckusick void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) 108852118Smckusick pmap_t dst_pmap; 108952118Smckusick pmap_t src_pmap; 109052118Smckusick vm_offset_t dst_addr; 109152118Smckusick vm_size_t len; 109252118Smckusick vm_offset_t src_addr; 109352118Smckusick { 109452118Smckusick 109552118Smckusick #ifdef DEBUG 109652118Smckusick if (pmapdebug & PDB_FOLLOW) 109752118Smckusick printf("pmap_copy(%x, %x, %x, %x, %x)\n", 109852118Smckusick dst_pmap, src_pmap, dst_addr, len, src_addr); 109952118Smckusick #endif 110052118Smckusick } 110152118Smckusick 110252118Smckusick /* 110352118Smckusick * Require that all active physical maps contain no 110452118Smckusick * incorrect entries NOW. [This update includes 110552118Smckusick * forcing updates of any address map caching.] 110652118Smckusick * 110752118Smckusick * Generally used to insure that a thread about 110852118Smckusick * to run will see a semantically correct world. 110952118Smckusick */ 111052118Smckusick void pmap_update() 111152118Smckusick { 111252118Smckusick 111352118Smckusick #ifdef DEBUG 111452118Smckusick if (pmapdebug & PDB_FOLLOW) 111552118Smckusick printf("pmap_update()\n"); 111652118Smckusick #endif 111752118Smckusick } 111852118Smckusick 111952118Smckusick /* 112052118Smckusick * Routine: pmap_collect 112152118Smckusick * Function: 112252118Smckusick * Garbage collects the physical map system for 112352118Smckusick * pages which are no longer used. 112452118Smckusick * Success need not be guaranteed -- that is, there 112552118Smckusick * may well be pages which are not referenced, but 112652118Smckusick * others may be collected. 112752118Smckusick * Usage: 112852118Smckusick * Called by the pageout daemon when pages are scarce. 112952118Smckusick */ 113052118Smckusick void 113152118Smckusick pmap_collect(pmap) 113252118Smckusick pmap_t pmap; 113352118Smckusick { 113452118Smckusick 113552118Smckusick #ifdef DEBUG 113652118Smckusick if (pmapdebug & PDB_FOLLOW) 113752118Smckusick printf("pmap_collect(%x)\n", pmap); 113852118Smckusick #endif 113952118Smckusick } 114052118Smckusick 114152118Smckusick /* 114252118Smckusick * pmap_zero_page zeros the specified (machine independent) 114352118Smckusick * page. 114452118Smckusick */ 114552118Smckusick void 114652118Smckusick pmap_zero_page(phys) 114752118Smckusick register vm_offset_t phys; 114852118Smckusick { 114952118Smckusick register vm_offset_t end; 115052118Smckusick 115152118Smckusick #ifdef DEBUG 115252118Smckusick if (pmapdebug & PDB_FOLLOW) 115352118Smckusick printf("pmap_zero_page(%x)\n", phys); 115452118Smckusick #endif 115552118Smckusick end = phys + PAGE_SIZE; 115652118Smckusick do { 115752118Smckusick ((unsigned *)phys)[0] = 0; 115852118Smckusick ((unsigned *)phys)[1] = 0; 115952118Smckusick ((unsigned *)phys)[2] = 0; 116052118Smckusick ((unsigned *)phys)[3] = 0; 116152118Smckusick phys += 4 * sizeof(unsigned); 116252118Smckusick } while (phys != end); 116352118Smckusick } 116452118Smckusick 116552118Smckusick /* 116652118Smckusick * pmap_copy_page copies the specified (machine independent) 116752118Smckusick * page. 116852118Smckusick */ 116952118Smckusick void 117052118Smckusick pmap_copy_page(src, dst) 117152118Smckusick register vm_offset_t src, dst; 117252118Smckusick { 117352118Smckusick register vm_offset_t end; 117452118Smckusick register unsigned tmp0, tmp1, tmp2, tmp3; 117552118Smckusick 117652118Smckusick #ifdef DEBUG 117752118Smckusick if (pmapdebug & PDB_FOLLOW) 117852118Smckusick printf("pmap_copy_page(%x, %x)\n", src, dst); 117952118Smckusick #endif 118052118Smckusick end = src + PAGE_SIZE; 118152118Smckusick do { 118252118Smckusick tmp0 = ((unsigned *)src)[0]; 118352118Smckusick tmp1 = ((unsigned *)src)[1]; 118452118Smckusick tmp2 = ((unsigned *)src)[2]; 118552118Smckusick tmp3 = ((unsigned *)src)[3]; 118652118Smckusick ((unsigned *)dst)[0] = tmp0; 118752118Smckusick ((unsigned *)dst)[1] = tmp1; 118852118Smckusick ((unsigned *)dst)[2] = tmp2; 118952118Smckusick ((unsigned *)dst)[3] = tmp3; 119052118Smckusick src += 4 * sizeof(unsigned); 119152118Smckusick dst += 4 * sizeof(unsigned); 119252118Smckusick } while (src != end); 119352118Smckusick } 119452118Smckusick 119552118Smckusick /* 119652118Smckusick * Routine: pmap_pageable 119752118Smckusick * Function: 119852118Smckusick * Make the specified pages (by pmap, offset) 119952118Smckusick * pageable (or not) as requested. 120052118Smckusick * 120152118Smckusick * A page which is not pageable may not take 120252118Smckusick * a fault; therefore, its page table entry 120352118Smckusick * must remain valid for the duration. 120452118Smckusick * 120552118Smckusick * This routine is merely advisory; pmap_enter 120652118Smckusick * will specify that these pages are to be wired 120752118Smckusick * down (or not) as appropriate. 120852118Smckusick */ 120952118Smckusick void 121052118Smckusick pmap_pageable(pmap, sva, eva, pageable) 121152118Smckusick pmap_t pmap; 121252118Smckusick vm_offset_t sva, eva; 121352118Smckusick boolean_t pageable; 121452118Smckusick { 121552118Smckusick 121652118Smckusick #ifdef DEBUG 121752118Smckusick if (pmapdebug & PDB_FOLLOW) 121852118Smckusick printf("pmap_pageable(%x, %x, %x, %x)\n", 121952118Smckusick pmap, sva, eva, pageable); 122052118Smckusick #endif 122152118Smckusick } 122252118Smckusick 122352118Smckusick /* 122452118Smckusick * Clear the modify bits on the specified physical page. 122552118Smckusick */ 122652118Smckusick void 122752118Smckusick pmap_clear_modify(pa) 122852118Smckusick vm_offset_t pa; 122952118Smckusick { 123052118Smckusick pmap_hash_t hp; 123152118Smckusick 123252118Smckusick #ifdef DEBUG 123352118Smckusick if (pmapdebug & PDB_FOLLOW) 123452118Smckusick printf("pmap_clear_modify(%x)\n", pa); 123552118Smckusick #endif 123652118Smckusick #ifdef ATTR 123752118Smckusick pmap_attributes[atop(pa - KERNBASE)] &= ~PMAP_ATTR_MOD; 123852118Smckusick #endif 123952118Smckusick } 124052118Smckusick 124152118Smckusick /* 124252118Smckusick * pmap_clear_reference: 124352118Smckusick * 124452118Smckusick * Clear the reference bit on the specified physical page. 124552118Smckusick */ 124652118Smckusick void 124752118Smckusick pmap_clear_reference(pa) 124852118Smckusick vm_offset_t pa; 124952118Smckusick { 125052118Smckusick 125152118Smckusick #ifdef DEBUG 125252118Smckusick if (pmapdebug & PDB_FOLLOW) 125352118Smckusick printf("pmap_clear_reference(%x)\n", pa); 125452118Smckusick #endif 125552118Smckusick #ifdef ATTR 125652118Smckusick pmap_attributes[atop(pa - KERNBASE)] &= ~PMAP_ATTR_REF; 125752118Smckusick #endif 125852118Smckusick } 125952118Smckusick 126052118Smckusick /* 126152118Smckusick * pmap_is_referenced: 126252118Smckusick * 126352118Smckusick * Return whether or not the specified physical page is referenced 126452118Smckusick * by any physical maps. 126552118Smckusick */ 126652118Smckusick boolean_t 126752118Smckusick pmap_is_referenced(pa) 126852118Smckusick vm_offset_t pa; 126952118Smckusick { 127052118Smckusick #ifdef ATTR 127152118Smckusick return(pmap_attributes[atop(pa - KERNBASE)] & PMAP_ATTR_REF); 127252118Smckusick #else 127352118Smckusick return(FALSE); 127452118Smckusick #endif 127552118Smckusick } 127652118Smckusick 127752118Smckusick /* 127852118Smckusick * pmap_is_modified: 127952118Smckusick * 128052118Smckusick * Return whether or not the specified physical page is modified 128152118Smckusick * by any physical maps. 128252118Smckusick */ 128352118Smckusick boolean_t 128452118Smckusick pmap_is_modified(pa) 128552118Smckusick vm_offset_t pa; 128652118Smckusick { 128752118Smckusick #ifdef ATTR 128852118Smckusick return(pmap_attributes[atop(pa - KERNBASE)] & PMAP_ATTR_MOD); 128952118Smckusick #else 129052118Smckusick return(FALSE); 129152118Smckusick #endif 129252118Smckusick } 129352118Smckusick 129452118Smckusick vm_offset_t 129552118Smckusick pmap_phys_address(ppn) 129652118Smckusick int ppn; 129752118Smckusick { 129852118Smckusick 129952118Smckusick #ifdef DEBUG 130052118Smckusick if (pmapdebug & PDB_FOLLOW) 130152118Smckusick printf("pmap_phys_address(%x)\n", ppn); 130252118Smckusick #endif 130352118Smckusick panic("pmap_phys_address"); /* XXX */ 130452118Smckusick return(pmax_ptob(ppn)); 130552118Smckusick } 130652118Smckusick 130752118Smckusick /* 130852118Smckusick * Miscellaneous support routines 130952118Smckusick */ 131052118Smckusick 131152118Smckusick /* 131252118Smckusick * Allocate a hardware PID and return it. 131352118Smckusick * Also, change the hardwired TLB entry to point to the current TLB cache. 131452118Smckusick * This is called by swtch(). 131552118Smckusick */ 131652118Smckusick int 131752118Smckusick pmap_alloc_tlbpid(p) 131852118Smckusick register struct proc *p; 131952118Smckusick { 132052118Smckusick register pmap_t pmap; 132152118Smckusick register u_int i; 132252118Smckusick register int id; 132352118Smckusick 132452118Smckusick pmap = &p->p_vmspace->vm_pmap; 132552118Smckusick if ((id = pmap->pm_tlbpid) >= 0) { 132652118Smckusick if (pmap->pm_flags & PM_MODIFIED) { 132752118Smckusick pmap->pm_flags &= ~PM_MODIFIED; 132852118Smckusick MachTLBFlushPID(id); 132952118Smckusick } 133052118Smckusick goto done; 133152118Smckusick } 133252118Smckusick 133352118Smckusick if ((i = whichpids[0]) != 0xFFFFFFFF) 133452118Smckusick id = 0; 133552118Smckusick else if ((i = whichpids[1]) != 0xFFFFFFFF) 133652118Smckusick id = 32; 133752118Smckusick else { 133852118Smckusick register struct proc *q; 133952118Smckusick register pmap_t q_pmap; 134052118Smckusick 134152118Smckusick /* 134252118Smckusick * Have to find a tlbpid to recycle. 134352118Smckusick * There is probably a better way to do this. 134452118Smckusick */ 134552118Smckusick for (q = allproc; q != NULL; q = q->p_nxt) { 134652118Smckusick q_pmap = &q->p_vmspace->vm_pmap; 134752118Smckusick if ((id = q_pmap->pm_tlbpid) < 0) 134852118Smckusick continue; 134952118Smckusick if (q->p_stat != SRUN) 135052118Smckusick goto fnd; 135152118Smckusick } 135252118Smckusick if (id < 0) 135352118Smckusick panic("TLBPidAlloc"); 135452118Smckusick fnd: 135552118Smckusick printf("pmap_alloc_tlbpid: recycle pid %d (%s) tlbpid %d\n", 135652118Smckusick q->p_pid, q->p_comm, id); /* XXX */ 135752118Smckusick /* 135852118Smckusick * Even though the virtual to physical mapping hasn't changed, 135952118Smckusick * we need to clear the PID tag in the high entry of the cache. 136052118Smckusick */ 136152118Smckusick if (q_pmap->pm_hash != zero_pmap_hash) { 136252118Smckusick register pmap_hash_t hp; 136352118Smckusick 136452118Smckusick hp = q_pmap->pm_hash; 136552118Smckusick for (i = 0; i < PMAP_HASH_NUM_ENTRIES; i++, hp++) { 136652118Smckusick if (!hp->high) 136752118Smckusick continue; 136852118Smckusick 136952118Smckusick if (hp->low & PG_WIRED) { 137052118Smckusick printf("Clearing wired user entry! h %x l %x\n", hp->high, hp->low); 137152118Smckusick panic("pmap_alloc_tlbpid: wired"); 137252118Smckusick } 137352118Smckusick pmap_remove_pv(pmap, hp->high & PG_FRAME, 137452118Smckusick hp->low & PG_FRAME); 137552118Smckusick hp->high = 0; 137652118Smckusick q_pmap->pm_stats.resident_count--; 137752118Smckusick } 137852118Smckusick } 137952118Smckusick q_pmap->pm_tlbpid = -1; 138052118Smckusick MachTLBFlushPID(id); 138152118Smckusick #ifdef DEBUG 138252118Smckusick remove_stats.pidflushes++; 138352118Smckusick #endif 138452118Smckusick pmap->pm_tlbpid = id; 138552118Smckusick goto done; 138652118Smckusick } 138752118Smckusick while (i & 1) { 138852118Smckusick i >>= 1; 138952118Smckusick id++; 139052118Smckusick } 139152118Smckusick whichpids[id >> 5] |= 1 << (id & 0x1F); 139252118Smckusick pmap->pm_tlbpid = id; 139352118Smckusick done: 139452118Smckusick /* 139552118Smckusick * Map in new TLB cache. 139652118Smckusick */ 139752118Smckusick if (pmap == cur_pmap) 139852118Smckusick return (id); 139952118Smckusick cur_pmap = pmap; 140052118Smckusick for (i = 0; i < PMAP_HASH_UPAGES; i++) { 140152118Smckusick MachTLBWriteIndexed(i + UPAGES, 140252118Smckusick (PMAP_HASH_UADDR + (i << PGSHIFT)) | 140352118Smckusick (id << VMMACH_TLB_PID_SHIFT), 140452118Smckusick pmap->pm_hash_ptes[i]); 140552118Smckusick } 140652118Smckusick return (id); 140752118Smckusick } 140852118Smckusick 140952118Smckusick /* 141052118Smckusick * Remove a physical to virtual address translation. 141152118Smckusick */ 141252118Smckusick void 141352118Smckusick pmap_remove_pv(pmap, va, pa) 141452118Smckusick pmap_t pmap; 141552118Smckusick vm_offset_t va, pa; 141652118Smckusick { 141752118Smckusick register pv_entry_t pv, npv; 141852118Smckusick int s; 141952118Smckusick 142052118Smckusick #ifdef DEBUG 142152118Smckusick if (pmapdebug & PDB_FOLLOW) 142252118Smckusick printf("pmap_remove_pv(%x, %x, %x)\n", pmap, va, pa); 142352118Smckusick #endif 142452118Smckusick /* 142552118Smckusick * Remove page from the PV table (raise IPL since we 142652118Smckusick * may be called at interrupt time). 142752118Smckusick */ 142852118Smckusick if (!IS_VM_PHYSADDR(pa)) 142952118Smckusick return; 143052118Smckusick pv = pa_to_pvh(pa); 143152118Smckusick s = splimp(); 143252118Smckusick /* 143352118Smckusick * If it is the first entry on the list, it is actually 143452118Smckusick * in the header and we must copy the following entry up 143552118Smckusick * to the header. Otherwise we must search the list for 143652118Smckusick * the entry. In either case we free the now unused entry. 143752118Smckusick */ 143852118Smckusick if (pmap == pv->pv_pmap && va == pv->pv_va) { 143952118Smckusick npv = pv->pv_next; 144052118Smckusick if (npv) { 144152118Smckusick *pv = *npv; 144252118Smckusick free((caddr_t)npv, M_VMPVENT); 144352118Smckusick } else 144452118Smckusick pv->pv_pmap = NULL; 144552118Smckusick #ifdef DEBUG 144652118Smckusick remove_stats.pvfirst++; 144752118Smckusick #endif 144852118Smckusick } else { 144952118Smckusick for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) { 145052118Smckusick #ifdef DEBUG 145152118Smckusick remove_stats.pvsearch++; 145252118Smckusick #endif 145352118Smckusick if (pmap == npv->pv_pmap && va == npv->pv_va) 145452118Smckusick goto fnd; 145552118Smckusick } 145652118Smckusick #ifdef DIAGNOSTIC 145752118Smckusick printf("pmap_remove_pv(%x, %x, %x) not found\n", pmap, va, pa); 145852118Smckusick panic("pmap_remove_pv"); 145952118Smckusick #endif 146052118Smckusick fnd: 146152118Smckusick pv->pv_next = npv->pv_next; 146252118Smckusick free((caddr_t)npv, M_VMPVENT); 146352118Smckusick } 146452118Smckusick splx(s); 146552118Smckusick } 146652118Smckusick 146752118Smckusick #ifdef DEBUG 146852118Smckusick pmap_print(pmap) 146952118Smckusick pmap_t pmap; 147052118Smckusick { 147152118Smckusick register pmap_hash_t hp; 147252118Smckusick register int i; 147352118Smckusick 147452118Smckusick printf("\tpmap_print(%x)\n", pmap); 147552118Smckusick 147652118Smckusick if (pmap->pm_hash == zero_pmap_hash) { 147752118Smckusick printf("pm_hash == zero\n"); 147852118Smckusick return; 147952118Smckusick } 148052118Smckusick if (pmap->pm_hash == (pmap_hash_t)0) { 148152118Smckusick printf("pm_hash == kernel\n"); 148252118Smckusick return; 148352118Smckusick } 148452118Smckusick hp = pmap->pm_hash; 148552118Smckusick for (i = 0; i < PMAP_HASH_NUM_ENTRIES; i++, hp++) { 148652118Smckusick if (!hp->high) 148752118Smckusick continue; 148852118Smckusick printf("%d: hi %x low %x\n", i, hp->high, hp->low); 148952118Smckusick } 149052118Smckusick } 149152118Smckusick #endif 1492