152118Smckusick /* 263219Sbostic * Copyright (c) 1992, 1993 363219Sbostic * The Regents of the University of California. All rights reserved. 452118Smckusick * 552118Smckusick * This code is derived from software contributed to Berkeley by 652118Smckusick * the Systems Programming Group of the University of Utah Computer 752118Smckusick * Science Department and Ralph Campbell. 852118Smckusick * 952118Smckusick * %sccs.include.redist.c% 1052118Smckusick * 11*65917Smckusick * @(#)pmap.c 8.4 (Berkeley) 01/26/94 1252118Smckusick */ 1352118Smckusick 1452118Smckusick /* 1552118Smckusick * Manages physical address maps. 1652118Smckusick * 1752118Smckusick * In addition to hardware address maps, this 1852118Smckusick * module is called upon to provide software-use-only 1952118Smckusick * maps which may or may not be stored in the same 2052118Smckusick * form as hardware maps. These pseudo-maps are 2152118Smckusick * used to store intermediate results from copy 2252118Smckusick * operations to and from address spaces. 2352118Smckusick * 2452118Smckusick * Since the information managed by this module is 2552118Smckusick * also stored by the logical address mapping module, 2652118Smckusick * this module may throw away valid virtual-to-physical 2752118Smckusick * mappings at almost any time. However, invalidations 2852118Smckusick * of virtual-to-physical mappings must be done as 2952118Smckusick * requested. 3052118Smckusick * 3152118Smckusick * In order to cope with hardware architectures which 3252118Smckusick * make virtual-to-physical map invalidates expensive, 3352118Smckusick * this module may delay invalidate or reduced protection 3452118Smckusick * operations until such time as they are actually 3552118Smckusick * necessary. This module is given full information as 3652118Smckusick * to which processors are currently using which maps, 3752118Smckusick * and to when physical maps must be made correct. 3852118Smckusick */ 3952118Smckusick 4056524Sbostic #include <sys/param.h> 4159842Sralph #include <sys/systm.h> 4256524Sbostic #include <sys/proc.h> 4356524Sbostic #include <sys/malloc.h> 4456524Sbostic #include <sys/user.h> 4559842Sralph #include <sys/buf.h> 46*65917Smckusick #ifdef SYSVSHM 47*65917Smckusick #include <sys/shm.h> 48*65917Smckusick #endif 4952118Smckusick 5056524Sbostic #include <vm/vm_kern.h> 5156524Sbostic #include <vm/vm_page.h> 5259842Sralph #include <vm/vm_pageout.h> 5352118Smckusick 5456524Sbostic #include <machine/machConst.h> 5556524Sbostic #include <machine/pte.h> 5652118Smckusick 5759842Sralph extern vm_page_t vm_page_alloc1 __P((void)); 5859842Sralph extern void vm_page_free1 __P((vm_page_t)); 5959842Sralph 6052118Smckusick /* 6152118Smckusick * For each vm_page_t, there is a list of all currently valid virtual 6252118Smckusick * mappings of that page. An entry is a pv_entry_t, the list is pv_table. 6352118Smckusick * XXX really should do this as a part of the higher level code. 6452118Smckusick */ 6552118Smckusick typedef struct pv_entry { 6652118Smckusick struct pv_entry *pv_next; /* next pv_entry */ 6752118Smckusick struct pmap *pv_pmap; /* pmap where mapping lies */ 6852118Smckusick vm_offset_t pv_va; /* virtual address for mapping */ 6952118Smckusick } *pv_entry_t; 7052118Smckusick 7152118Smckusick pv_entry_t pv_table; /* array of entries, one per page */ 7252118Smckusick extern void pmap_remove_pv(); 7352118Smckusick 7452118Smckusick #define pa_index(pa) atop((pa) - first_phys_addr) 7552118Smckusick #define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) 7652118Smckusick 7752118Smckusick #ifdef DEBUG 7852118Smckusick struct { 7952118Smckusick int kernel; /* entering kernel mapping */ 8052118Smckusick int user; /* entering user mapping */ 8152118Smckusick int ptpneeded; /* needed to allocate a PT page */ 8252118Smckusick int pwchange; /* no mapping change, just wiring or protection */ 8352118Smckusick int wchange; /* no mapping change, just wiring */ 8452118Smckusick int mchange; /* was mapped but mapping to different page */ 8552118Smckusick int managed; /* a managed page */ 8652118Smckusick int firstpv; /* first mapping for this PA */ 8752118Smckusick int secondpv; /* second mapping for this PA */ 8852118Smckusick int ci; /* cache inhibited */ 8952118Smckusick int unmanaged; /* not a managed page */ 9052118Smckusick int flushes; /* cache flushes */ 9152118Smckusick int cachehit; /* new entry forced valid entry out */ 9252118Smckusick } enter_stats; 9352118Smckusick struct { 9452118Smckusick int calls; 9552118Smckusick int removes; 9652118Smckusick int flushes; 9752118Smckusick int pidflushes; /* HW pid stolen */ 9852118Smckusick int pvfirst; 9952118Smckusick int pvsearch; 10052118Smckusick } remove_stats; 10152118Smckusick 10252118Smckusick int pmapdebug; 10352118Smckusick #define PDB_FOLLOW 0x0001 10452118Smckusick #define PDB_INIT 0x0002 10552118Smckusick #define PDB_ENTER 0x0004 10652118Smckusick #define PDB_REMOVE 0x0008 10752118Smckusick #define PDB_CREATE 0x0010 10852118Smckusick #define PDB_PTPAGE 0x0020 10959842Sralph #define PDB_PVENTRY 0x0040 11052118Smckusick #define PDB_BITS 0x0080 11152118Smckusick #define PDB_COLLECT 0x0100 11252118Smckusick #define PDB_PROTECT 0x0200 11352118Smckusick #define PDB_TLBPID 0x0400 11452118Smckusick #define PDB_PARANOIA 0x2000 11552118Smckusick #define PDB_WIRING 0x4000 11652118Smckusick #define PDB_PVDUMP 0x8000 11752118Smckusick 11852118Smckusick #endif /* DEBUG */ 11952118Smckusick 12052118Smckusick struct pmap kernel_pmap_store; 12152118Smckusick 12252118Smckusick vm_offset_t avail_start; /* PA of first available physical page */ 12352118Smckusick vm_offset_t avail_end; /* PA of last available physical page */ 12452118Smckusick vm_size_t mem_size; /* memory size in bytes */ 12552118Smckusick vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/ 12652118Smckusick vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 12752118Smckusick int pmaxpagesperpage; /* PAGE_SIZE / NBPG */ 12852118Smckusick #ifdef ATTR 12952118Smckusick char *pmap_attributes; /* reference and modify bits */ 13052118Smckusick #endif 13159842Sralph struct segtab *free_segtab; /* free list kept locally */ 13259842Sralph u_int tlbpid_gen = 1; /* TLB PID generation count */ 13359842Sralph int tlbpid_cnt = 2; /* next available TLB PID */ 13459842Sralph pt_entry_t *Sysmap; /* kernel pte table */ 13559842Sralph u_int Sysmapsize; /* number of pte's in Sysmap */ 13652118Smckusick 13752118Smckusick /* 13852118Smckusick * Bootstrap the system enough to run with virtual memory. 13959842Sralph * firstaddr is the first unused kseg0 address (not page aligned). 14052118Smckusick */ 14152118Smckusick void 14252118Smckusick pmap_bootstrap(firstaddr) 14352118Smckusick vm_offset_t firstaddr; 14452118Smckusick { 14552118Smckusick register int i; 14652118Smckusick vm_offset_t start = firstaddr; 14752118Smckusick extern int maxmem, physmem; 14852118Smckusick 14959842Sralph #define valloc(name, type, num) \ 15059842Sralph (name) = (type *)firstaddr; firstaddr = (vm_offset_t)((name)+(num)) 15152118Smckusick /* 15259842Sralph * Allocate a PTE table for the kernel. 153*65917Smckusick * The '1024' comes from PAGER_MAP_SIZE in vm_pager_init(). 15459842Sralph * This should be kept in sync. 15559842Sralph * We also reserve space for kmem_alloc_pageable() for vm_fork(). 15652118Smckusick */ 15759842Sralph Sysmapsize = (VM_KMEM_SIZE + VM_MBUF_SIZE + VM_PHYS_SIZE + 158*65917Smckusick nbuf * MAXBSIZE + 16 * NCARGS) / NBPG + 1024 + 256; 159*65917Smckusick #ifdef SYSVSHM 160*65917Smckusick Sysmapsize += shminfo.shmall; 161*65917Smckusick #endif 16259842Sralph valloc(Sysmap, pt_entry_t, Sysmapsize); 16359842Sralph #ifdef ATTR 16459842Sralph valloc(pmap_attributes, char, physmem); 16559842Sralph #endif 16652118Smckusick /* 16752118Smckusick * Allocate memory for pv_table. 16852118Smckusick * This will allocate more entries than we really need. 16959842Sralph * We could do this in pmap_init when we know the actual 17059842Sralph * phys_start and phys_end but its better to use kseg0 addresses 17152118Smckusick * rather than kernel virtual addresses mapped through the TLB. 17252118Smckusick */ 17359842Sralph i = maxmem - pmax_btop(MACH_CACHED_TO_PHYS(firstaddr)); 17459842Sralph valloc(pv_table, struct pv_entry, i); 17552118Smckusick 17652118Smckusick /* 17752118Smckusick * Clear allocated memory. 17852118Smckusick */ 17959842Sralph firstaddr = pmax_round_page(firstaddr); 18059842Sralph bzero((caddr_t)start, firstaddr - start); 18152118Smckusick 18259842Sralph avail_start = MACH_CACHED_TO_PHYS(firstaddr); 18352118Smckusick avail_end = pmax_ptob(maxmem); 18452118Smckusick mem_size = avail_end - avail_start; 18552118Smckusick 18652118Smckusick virtual_avail = VM_MIN_KERNEL_ADDRESS; 18759842Sralph virtual_end = VM_MIN_KERNEL_ADDRESS + Sysmapsize * NBPG; 18852118Smckusick /* XXX need to decide how to set cnt.v_page_size */ 18952118Smckusick pmaxpagesperpage = 1; 19052118Smckusick 19152937Sralph simple_lock_init(&kernel_pmap_store.pm_lock); 19252937Sralph kernel_pmap_store.pm_count = 1; 19352118Smckusick } 19452118Smckusick 19552118Smckusick /* 19652118Smckusick * Bootstrap memory allocator. This function allows for early dynamic 19752118Smckusick * memory allocation until the virtual memory system has been bootstrapped. 19852118Smckusick * After that point, either kmem_alloc or malloc should be used. This 19952118Smckusick * function works by stealing pages from the (to be) managed page pool, 20052118Smckusick * stealing virtual address space, then mapping the pages and zeroing them. 20152118Smckusick * 20252118Smckusick * It should be used from pmap_bootstrap till vm_page_startup, afterwards 20352118Smckusick * it cannot be used, and will generate a panic if tried. Note that this 20452118Smckusick * memory will never be freed, and in essence it is wired down. 20552118Smckusick */ 20652118Smckusick void * 20752118Smckusick pmap_bootstrap_alloc(size) 20852118Smckusick int size; 20952118Smckusick { 21052118Smckusick vm_offset_t val; 21152118Smckusick extern boolean_t vm_page_startup_initialized; 21252118Smckusick 21352118Smckusick if (vm_page_startup_initialized) 21452118Smckusick panic("pmap_bootstrap_alloc: called after startup initialized"); 21552118Smckusick 21652741Sralph val = MACH_PHYS_TO_CACHED(avail_start); 21752118Smckusick size = round_page(size); 21852118Smckusick avail_start += size; 21952118Smckusick 22052741Sralph blkclr((caddr_t)val, size); 22152741Sralph return ((void *)val); 22252118Smckusick } 22352118Smckusick 22452118Smckusick /* 22552118Smckusick * Initialize the pmap module. 22652118Smckusick * Called by vm_init, to initialize any structures that the pmap 22752118Smckusick * system needs to map virtual memory. 22852118Smckusick */ 22952118Smckusick void 23052118Smckusick pmap_init(phys_start, phys_end) 23152118Smckusick vm_offset_t phys_start, phys_end; 23252118Smckusick { 23352118Smckusick 23452118Smckusick #ifdef DEBUG 23559842Sralph if (pmapdebug & (PDB_FOLLOW|PDB_INIT)) 23652118Smckusick printf("pmap_init(%x, %x)\n", phys_start, phys_end); 23752118Smckusick #endif 23852118Smckusick } 23952118Smckusick 24052118Smckusick /* 24152118Smckusick * Create and return a physical map. 24252118Smckusick * 24352118Smckusick * If the size specified for the map 24452118Smckusick * is zero, the map is an actual physical 24552118Smckusick * map, and may be referenced by the 24652118Smckusick * hardware. 24752118Smckusick * 24852118Smckusick * If the size specified is non-zero, 24952118Smckusick * the map will be used in software only, and 25052118Smckusick * is bounded by that size. 25152118Smckusick */ 25252118Smckusick pmap_t 25352118Smckusick pmap_create(size) 25452118Smckusick vm_size_t size; 25552118Smckusick { 25652118Smckusick register pmap_t pmap; 25752118Smckusick 25852118Smckusick #ifdef DEBUG 25952118Smckusick if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) 26052118Smckusick printf("pmap_create(%x)\n", size); 26152118Smckusick #endif 26252118Smckusick /* 26352118Smckusick * Software use map does not need a pmap 26452118Smckusick */ 26552118Smckusick if (size) 26652741Sralph return (NULL); 26752118Smckusick 26852118Smckusick /* XXX: is it ok to wait here? */ 26952118Smckusick pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK); 27052118Smckusick #ifdef notifwewait 27152118Smckusick if (pmap == NULL) 27252118Smckusick panic("pmap_create: cannot allocate a pmap"); 27352118Smckusick #endif 27452118Smckusick bzero(pmap, sizeof(*pmap)); 27552118Smckusick pmap_pinit(pmap); 27652118Smckusick return (pmap); 27752118Smckusick } 27852118Smckusick 27952118Smckusick /* 28052118Smckusick * Initialize a preallocated and zeroed pmap structure, 28152118Smckusick * such as one in a vmspace structure. 28252118Smckusick */ 28352118Smckusick void 28452118Smckusick pmap_pinit(pmap) 28552118Smckusick register struct pmap *pmap; 28652118Smckusick { 28752118Smckusick register int i; 28859842Sralph int s; 28952118Smckusick extern struct vmspace vmspace0; 29059842Sralph extern struct user *proc0paddr; 29152118Smckusick 29252118Smckusick #ifdef DEBUG 29352118Smckusick if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) 29452118Smckusick printf("pmap_pinit(%x)\n", pmap); 29552118Smckusick #endif 29652118Smckusick simple_lock_init(&pmap->pm_lock); 29752118Smckusick pmap->pm_count = 1; 29859842Sralph if (free_segtab) { 29959842Sralph s = splimp(); 30059842Sralph pmap->pm_segtab = free_segtab; 30159842Sralph free_segtab = *(struct segtab **)free_segtab; 30259842Sralph pmap->pm_segtab->seg_tab[0] = NULL; 30359842Sralph splx(s); 30459842Sralph } else { 30559842Sralph register struct segtab *stp; 30659842Sralph vm_page_t mem; 30759842Sralph 30859842Sralph mem = vm_page_alloc1(); 30959842Sralph pmap_zero_page(VM_PAGE_TO_PHYS(mem)); 31059842Sralph pmap->pm_segtab = stp = (struct segtab *) 31159842Sralph MACH_PHYS_TO_CACHED(VM_PAGE_TO_PHYS(mem)); 31259842Sralph i = pmaxpagesperpage * (NBPG / sizeof(struct segtab)); 31359842Sralph s = splimp(); 31459842Sralph while (--i != 0) { 31559842Sralph stp++; 31659842Sralph *(struct segtab **)stp = free_segtab; 31759842Sralph free_segtab = stp; 31859842Sralph } 31959842Sralph splx(s); 32059842Sralph } 32159842Sralph #ifdef DIAGNOSTIC 32259842Sralph for (i = 0; i < PMAP_SEGTABSIZE; i++) 32359842Sralph if (pmap->pm_segtab->seg_tab[i] != 0) 32459842Sralph panic("pmap_pinit: pm_segtab != 0"); 32559842Sralph #endif 32659842Sralph if (pmap == &vmspace0.vm_pmap) { 32759842Sralph /* 32859842Sralph * The initial process has already been allocated a TLBPID 32959842Sralph * in mach_init(). 33059842Sralph */ 33159842Sralph pmap->pm_tlbpid = 1; 33259842Sralph pmap->pm_tlbgen = tlbpid_gen; 33359842Sralph proc0paddr->u_pcb.pcb_segtab = (void *)pmap->pm_segtab; 33459842Sralph } else { 33559842Sralph pmap->pm_tlbpid = 0; 33659842Sralph pmap->pm_tlbgen = 0; 33759842Sralph } 33852118Smckusick } 33952118Smckusick 34052118Smckusick /* 34152118Smckusick * Retire the given physical map from service. 34252118Smckusick * Should only be called if the map contains 34352118Smckusick * no valid mappings. 34452118Smckusick */ 34552118Smckusick void 34652118Smckusick pmap_destroy(pmap) 34752118Smckusick register pmap_t pmap; 34852118Smckusick { 34952118Smckusick int count; 35052118Smckusick 35152118Smckusick #ifdef DEBUG 35259842Sralph if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) 35352118Smckusick printf("pmap_destroy(%x)\n", pmap); 35452118Smckusick #endif 35552118Smckusick if (pmap == NULL) 35652118Smckusick return; 35752118Smckusick 35852118Smckusick simple_lock(&pmap->pm_lock); 35952118Smckusick count = --pmap->pm_count; 36052118Smckusick simple_unlock(&pmap->pm_lock); 36152118Smckusick if (count == 0) { 36252118Smckusick pmap_release(pmap); 36352118Smckusick free((caddr_t)pmap, M_VMPMAP); 36452118Smckusick } 36552118Smckusick } 36652118Smckusick 36752118Smckusick /* 36852118Smckusick * Release any resources held by the given physical map. 36952118Smckusick * Called when a pmap initialized by pmap_pinit is being released. 37052118Smckusick * Should only be called if the map contains no valid mappings. 37152118Smckusick */ 37252118Smckusick void 37352118Smckusick pmap_release(pmap) 37452118Smckusick register pmap_t pmap; 37552118Smckusick { 37652118Smckusick 37752118Smckusick #ifdef DEBUG 37859842Sralph if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) 37952118Smckusick printf("pmap_release(%x)\n", pmap); 38052118Smckusick #endif 38152118Smckusick 38259842Sralph if (pmap->pm_segtab) { 38359842Sralph register pt_entry_t *pte; 38459842Sralph register int i; 38559842Sralph int s; 38652118Smckusick #ifdef DIAGNOSTIC 38759842Sralph register int j; 38852118Smckusick #endif 38959842Sralph 39059842Sralph for (i = 0; i < PMAP_SEGTABSIZE; i++) { 39159842Sralph /* get pointer to segment map */ 39259842Sralph pte = pmap->pm_segtab->seg_tab[i]; 39359842Sralph if (!pte) 39459842Sralph continue; 39559842Sralph vm_page_free1( 39659842Sralph PHYS_TO_VM_PAGE(MACH_CACHED_TO_PHYS(pte))); 39752118Smckusick #ifdef DIAGNOSTIC 39859842Sralph for (j = 0; j < NPTEPG; j++) { 39959842Sralph if (pte->pt_entry) 40059842Sralph panic("pmap_release: segmap not empty"); 40159842Sralph } 40252118Smckusick #endif 40359842Sralph pmap->pm_segtab->seg_tab[i] = NULL; 40459842Sralph } 40559842Sralph s = splimp(); 40659842Sralph *(struct segtab **)pmap->pm_segtab = free_segtab; 40759842Sralph free_segtab = pmap->pm_segtab; 40859842Sralph splx(s); 40959842Sralph pmap->pm_segtab = NULL; 41059842Sralph } 41152118Smckusick } 41252118Smckusick 41352118Smckusick /* 41452118Smckusick * Add a reference to the specified pmap. 41552118Smckusick */ 41652118Smckusick void 41752118Smckusick pmap_reference(pmap) 41852118Smckusick pmap_t pmap; 41952118Smckusick { 42052118Smckusick 42152118Smckusick #ifdef DEBUG 42252118Smckusick if (pmapdebug & PDB_FOLLOW) 42352118Smckusick printf("pmap_reference(%x)\n", pmap); 42452118Smckusick #endif 42552118Smckusick if (pmap != NULL) { 42652118Smckusick simple_lock(&pmap->pm_lock); 42752118Smckusick pmap->pm_count++; 42852118Smckusick simple_unlock(&pmap->pm_lock); 42952118Smckusick } 43052118Smckusick } 43152118Smckusick 43252118Smckusick /* 43352118Smckusick * Remove the given range of addresses from the specified map. 43452118Smckusick * 43552118Smckusick * It is assumed that the start and end are properly 43652118Smckusick * rounded to the page size. 43752118Smckusick */ 43852118Smckusick void 43952118Smckusick pmap_remove(pmap, sva, eva) 44052118Smckusick register pmap_t pmap; 44152118Smckusick vm_offset_t sva, eva; 44252118Smckusick { 44359842Sralph register vm_offset_t nssva; 44459842Sralph register pt_entry_t *pte; 44552118Smckusick unsigned entry; 44652118Smckusick 44752118Smckusick #ifdef DEBUG 44852118Smckusick if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) 44952118Smckusick printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva); 45052118Smckusick remove_stats.calls++; 45152118Smckusick #endif 45252118Smckusick if (pmap == NULL) 45352118Smckusick return; 45452118Smckusick 45559842Sralph if (!pmap->pm_segtab) { 45652118Smckusick register pt_entry_t *pte; 45752118Smckusick 45852118Smckusick /* remove entries from kernel pmap */ 45952741Sralph #ifdef DIAGNOSTIC 46059842Sralph if (sva < VM_MIN_KERNEL_ADDRESS || eva > virtual_end) 46159842Sralph panic("pmap_remove: kva not in range"); 46252741Sralph #endif 46352118Smckusick pte = kvtopte(sva); 46459842Sralph for (; sva < eva; sva += NBPG, pte++) { 46552118Smckusick entry = pte->pt_entry; 46652118Smckusick if (!(entry & PG_V)) 46752118Smckusick continue; 46852118Smckusick if (entry & PG_WIRED) 46952118Smckusick pmap->pm_stats.wired_count--; 47052118Smckusick pmap->pm_stats.resident_count--; 47159842Sralph pmap_remove_pv(pmap, sva, entry & PG_FRAME); 47252118Smckusick #ifdef ATTR 47359842Sralph pmap_attributes[atop(entry & PG_FRAME)] = 0; 47452118Smckusick #endif 47552118Smckusick pte->pt_entry = PG_NV; 47652118Smckusick /* 47752118Smckusick * Flush the TLB for the given address. 47852118Smckusick */ 47959842Sralph MachTLBFlushAddr(sva); 48052118Smckusick #ifdef DEBUG 48152118Smckusick remove_stats.flushes++; 48252118Smckusick #endif 48352118Smckusick } 48452118Smckusick return; 48552118Smckusick } 48652118Smckusick 48759842Sralph #ifdef DIAGNOSTIC 48859842Sralph if (eva > VM_MAXUSER_ADDRESS) 48959842Sralph panic("pmap_remove: uva not in range"); 49059842Sralph #endif 49159842Sralph while (sva < eva) { 49259842Sralph nssva = pmax_trunc_seg(sva) + NBSEG; 49359842Sralph if (nssva == 0 || nssva > eva) 49459842Sralph nssva = eva; 49559842Sralph /* 49659842Sralph * If VA belongs to an unallocated segment, 49759842Sralph * skip to the next segment boundary. 49859842Sralph */ 49959842Sralph if (!(pte = pmap_segmap(pmap, sva))) { 50059842Sralph sva = nssva; 50159842Sralph continue; 50259842Sralph } 50359842Sralph /* 50459842Sralph * Invalidate every valid mapping within this segment. 50559842Sralph */ 50659842Sralph pte += (sva >> PGSHIFT) & (NPTEPG - 1); 50759842Sralph for (; sva < nssva; sva += NBPG, pte++) { 50859842Sralph entry = pte->pt_entry; 50959842Sralph if (!(entry & PG_V)) 51052118Smckusick continue; 51152118Smckusick if (entry & PG_WIRED) 51252118Smckusick pmap->pm_stats.wired_count--; 51352118Smckusick pmap->pm_stats.resident_count--; 51459842Sralph pmap_remove_pv(pmap, sva, entry & PG_FRAME); 51552118Smckusick #ifdef ATTR 51659842Sralph pmap_attributes[atop(entry & PG_FRAME)] = 0; 51752118Smckusick #endif 51859842Sralph pte->pt_entry = PG_NV; 51959842Sralph /* 52059842Sralph * Flush the TLB for the given address. 52159842Sralph */ 52259842Sralph if (pmap->pm_tlbgen == tlbpid_gen) { 52359842Sralph MachTLBFlushAddr(sva | (pmap->pm_tlbpid << 52459842Sralph VMMACH_TLB_PID_SHIFT)); 52552118Smckusick #ifdef DEBUG 52659842Sralph remove_stats.flushes++; 52752118Smckusick #endif 52859842Sralph } 52952118Smckusick } 53052118Smckusick } 53152118Smckusick } 53252118Smckusick 53352118Smckusick /* 53452118Smckusick * pmap_page_protect: 53552118Smckusick * 53652118Smckusick * Lower the permission for all mappings to a given page. 53752118Smckusick */ 53852118Smckusick void 53952118Smckusick pmap_page_protect(pa, prot) 54052118Smckusick vm_offset_t pa; 54152118Smckusick vm_prot_t prot; 54252118Smckusick { 54352118Smckusick register pv_entry_t pv; 54452118Smckusick register vm_offset_t va; 54552118Smckusick int s; 54652118Smckusick 54752118Smckusick #ifdef DEBUG 54852118Smckusick if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) || 54952118Smckusick prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)) 55052118Smckusick printf("pmap_page_protect(%x, %x)\n", pa, prot); 55152118Smckusick #endif 55252118Smckusick if (!IS_VM_PHYSADDR(pa)) 55352118Smckusick return; 55452118Smckusick 55552118Smckusick switch (prot) { 55659842Sralph case VM_PROT_READ|VM_PROT_WRITE: 55752118Smckusick case VM_PROT_ALL: 55852118Smckusick break; 55952118Smckusick 56052118Smckusick /* copy_on_write */ 56152118Smckusick case VM_PROT_READ: 56252118Smckusick case VM_PROT_READ|VM_PROT_EXECUTE: 56352118Smckusick pv = pa_to_pvh(pa); 56452118Smckusick s = splimp(); 56552118Smckusick /* 56652118Smckusick * Loop over all current mappings setting/clearing as appropos. 56752118Smckusick */ 56852118Smckusick if (pv->pv_pmap != NULL) { 56952118Smckusick for (; pv; pv = pv->pv_next) { 57052118Smckusick extern vm_offset_t pager_sva, pager_eva; 57159842Sralph 57252118Smckusick va = pv->pv_va; 57352118Smckusick 57452118Smckusick /* 57552118Smckusick * XXX don't write protect pager mappings 57652118Smckusick */ 57752118Smckusick if (va >= pager_sva && va < pager_eva) 57852118Smckusick continue; 57952118Smckusick pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE, 58052118Smckusick prot); 58152118Smckusick } 58252118Smckusick } 58352118Smckusick splx(s); 58452118Smckusick break; 58552118Smckusick 58652118Smckusick /* remove_all */ 58752118Smckusick default: 58852118Smckusick pv = pa_to_pvh(pa); 58952118Smckusick s = splimp(); 59052118Smckusick while (pv->pv_pmap != NULL) { 59152118Smckusick pmap_remove(pv->pv_pmap, pv->pv_va, 59252118Smckusick pv->pv_va + PAGE_SIZE); 59352118Smckusick } 59452118Smckusick splx(s); 59552118Smckusick } 59652118Smckusick } 59752118Smckusick 59852118Smckusick /* 59952118Smckusick * Set the physical protection on the 60052118Smckusick * specified range of this map as requested. 60152118Smckusick */ 60252118Smckusick void 60352118Smckusick pmap_protect(pmap, sva, eva, prot) 60452118Smckusick register pmap_t pmap; 60552118Smckusick vm_offset_t sva, eva; 60652118Smckusick vm_prot_t prot; 60752118Smckusick { 60859842Sralph register vm_offset_t nssva; 60959842Sralph register pt_entry_t *pte; 61059842Sralph register unsigned entry; 61152118Smckusick u_int p; 61252118Smckusick 61352118Smckusick #ifdef DEBUG 61452118Smckusick if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) 61552118Smckusick printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot); 61652118Smckusick #endif 61752118Smckusick if (pmap == NULL) 61852118Smckusick return; 61952118Smckusick 62059842Sralph if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 62152118Smckusick pmap_remove(pmap, sva, eva); 62252118Smckusick return; 62352118Smckusick } 62452118Smckusick 62559842Sralph p = (prot & VM_PROT_WRITE) ? PG_RW : PG_RO; 62652118Smckusick 62759842Sralph if (!pmap->pm_segtab) { 62852118Smckusick /* 62952118Smckusick * Change entries in kernel pmap. 63052118Smckusick * This will trap if the page is writeable (in order to set 63152118Smckusick * the dirty bit) even if the dirty bit is already set. The 63252118Smckusick * optimization isn't worth the effort since this code isn't 63352118Smckusick * executed much. The common case is to make a user page 63452118Smckusick * read-only. 63552118Smckusick */ 63652741Sralph #ifdef DIAGNOSTIC 63759842Sralph if (sva < VM_MIN_KERNEL_ADDRESS || eva > virtual_end) 63859842Sralph panic("pmap_protect: kva not in range"); 63952741Sralph #endif 64052118Smckusick pte = kvtopte(sva); 64159842Sralph for (; sva < eva; sva += NBPG, pte++) { 64259842Sralph entry = pte->pt_entry; 64359842Sralph if (!(entry & PG_V)) 64452118Smckusick continue; 64559842Sralph entry = (entry & ~(PG_M | PG_RO)) | p; 64659842Sralph pte->pt_entry = entry; 64752118Smckusick /* 64852118Smckusick * Update the TLB if the given address is in the cache. 64952118Smckusick */ 65059842Sralph MachTLBUpdate(sva, entry); 65152118Smckusick } 65252118Smckusick return; 65352118Smckusick } 65452118Smckusick 65559842Sralph #ifdef DIAGNOSTIC 65659842Sralph if (eva > VM_MAXUSER_ADDRESS) 65759842Sralph panic("pmap_protect: uva not in range"); 65859842Sralph #endif 65959842Sralph while (sva < eva) { 66059842Sralph nssva = pmax_trunc_seg(sva) + NBSEG; 66159842Sralph if (nssva == 0 || nssva > eva) 66259842Sralph nssva = eva; 66359842Sralph /* 66459842Sralph * If VA belongs to an unallocated segment, 66559842Sralph * skip to the next segment boundary. 66659842Sralph */ 66759842Sralph if (!(pte = pmap_segmap(pmap, sva))) { 66859842Sralph sva = nssva; 66959842Sralph continue; 67059842Sralph } 67159842Sralph /* 67259842Sralph * Change protection on every valid mapping within this segment. 67359842Sralph */ 67459842Sralph pte += (sva >> PGSHIFT) & (NPTEPG - 1); 67559842Sralph for (; sva < nssva; sva += NBPG, pte++) { 67659842Sralph entry = pte->pt_entry; 67759842Sralph if (!(entry & PG_V)) 67852118Smckusick continue; 67959842Sralph entry = (entry & ~(PG_M | PG_RO)) | p; 68059842Sralph pte->pt_entry = entry; 68159842Sralph /* 68259842Sralph * Update the TLB if the given address is in the cache. 68359842Sralph */ 68459842Sralph if (pmap->pm_tlbgen == tlbpid_gen) 68559842Sralph MachTLBUpdate(sva | (pmap->pm_tlbpid << 68659842Sralph VMMACH_TLB_PID_SHIFT), entry); 68752118Smckusick } 68852118Smckusick } 68952118Smckusick } 69052118Smckusick 69152118Smckusick /* 69252118Smckusick * Insert the given physical page (p) at 69352118Smckusick * the specified virtual address (v) in the 69452118Smckusick * target physical map with the protection requested. 69552118Smckusick * 69652118Smckusick * If specified, the page will be wired down, meaning 69752118Smckusick * that the related pte can not be reclaimed. 69852118Smckusick * 69952118Smckusick * NB: This is the only routine which MAY NOT lazy-evaluate 70052118Smckusick * or lose information. That is, this routine must actually 70152118Smckusick * insert this page into the given map NOW. 70252118Smckusick */ 70352118Smckusick void 70452118Smckusick pmap_enter(pmap, va, pa, prot, wired) 70552118Smckusick register pmap_t pmap; 70652118Smckusick vm_offset_t va; 70752118Smckusick register vm_offset_t pa; 70852118Smckusick vm_prot_t prot; 70952118Smckusick boolean_t wired; 71052118Smckusick { 71159842Sralph register pt_entry_t *pte; 71252118Smckusick register u_int npte; 71353718Smckusick register int i, j; 71459842Sralph vm_page_t mem; 71552118Smckusick 71652118Smckusick #ifdef DEBUG 71752118Smckusick if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) 71852118Smckusick printf("pmap_enter(%x, %x, %x, %x, %x)\n", 71952118Smckusick pmap, va, pa, prot, wired); 72052118Smckusick #endif 72152118Smckusick #ifdef DIAGNOSTIC 72252118Smckusick if (!pmap) 72352118Smckusick panic("pmap_enter: pmap"); 72459842Sralph if (!pmap->pm_segtab) { 72552118Smckusick enter_stats.kernel++; 72659842Sralph if (va < VM_MIN_KERNEL_ADDRESS || va >= virtual_end) 72752118Smckusick panic("pmap_enter: kva"); 72852118Smckusick } else { 72952118Smckusick enter_stats.user++; 73059842Sralph if (va >= VM_MAXUSER_ADDRESS) 73152118Smckusick panic("pmap_enter: uva"); 73252118Smckusick } 73352741Sralph if (pa & 0x80000000) 73452741Sralph panic("pmap_enter: pa"); 73552118Smckusick if (!(prot & VM_PROT_READ)) 73652118Smckusick panic("pmap_enter: prot"); 73752118Smckusick #endif 73852118Smckusick 73952118Smckusick if (IS_VM_PHYSADDR(pa)) { 74052118Smckusick register pv_entry_t pv, npv; 74152118Smckusick int s; 74252118Smckusick 74352118Smckusick if (!(prot & VM_PROT_WRITE)) 74452118Smckusick npte = PG_RO; 74552118Smckusick else { 74652118Smckusick register vm_page_t mem; 74752118Smckusick 74852118Smckusick mem = PHYS_TO_VM_PAGE(pa); 74952118Smckusick if ((int)va < 0) { 75052118Smckusick /* 75152118Smckusick * Don't bother to trap on kernel writes, 75252118Smckusick * just record page as dirty. 75352118Smckusick */ 75452118Smckusick npte = PG_M; 75556633Sralph mem->flags &= ~PG_CLEAN; 75652118Smckusick } else 75752118Smckusick #ifdef ATTR 75859842Sralph if ((pmap_attributes[atop(pa)] & 75956633Sralph PMAP_ATTR_MOD) || !(mem->flags & PG_CLEAN)) 76052118Smckusick #else 76156633Sralph if (!(mem->flags & PG_CLEAN)) 76252118Smckusick #endif 76352118Smckusick npte = PG_M; 76452118Smckusick else 76552118Smckusick npte = 0; 76652118Smckusick } 76752118Smckusick 76852118Smckusick #ifdef DEBUG 76952118Smckusick enter_stats.managed++; 77052118Smckusick #endif 77152118Smckusick /* 77252118Smckusick * Enter the pmap and virtual address into the 77352118Smckusick * physical to virtual map table. 77452118Smckusick */ 77552118Smckusick pv = pa_to_pvh(pa); 77652118Smckusick s = splimp(); 77752118Smckusick #ifdef DEBUG 77852118Smckusick if (pmapdebug & PDB_ENTER) 77952118Smckusick printf("pmap_enter: pv %x: was %x/%x/%x\n", 78052118Smckusick pv, pv->pv_va, pv->pv_pmap, pv->pv_next); 78152118Smckusick #endif 78252118Smckusick if (pv->pv_pmap == NULL) { 78352118Smckusick /* 78452118Smckusick * No entries yet, use header as the first entry 78552118Smckusick */ 78652118Smckusick #ifdef DEBUG 78759842Sralph if (pmapdebug & PDB_PVENTRY) 78859842Sralph printf("pmap_enter: first pv: pmap %x va %x\n", 78959842Sralph pmap, va); 79052118Smckusick enter_stats.firstpv++; 79152118Smckusick #endif 79252118Smckusick pv->pv_va = va; 79352118Smckusick pv->pv_pmap = pmap; 79452118Smckusick pv->pv_next = NULL; 79552118Smckusick } else { 79652118Smckusick /* 79752118Smckusick * There is at least one other VA mapping this page. 79852118Smckusick * Place this entry after the header. 79952118Smckusick * 80052118Smckusick * Note: the entry may already be in the table if 80152118Smckusick * we are only changing the protection bits. 80252118Smckusick */ 80352118Smckusick for (npv = pv; npv; npv = npv->pv_next) 80452118Smckusick if (pmap == npv->pv_pmap && va == npv->pv_va) { 80552118Smckusick #ifdef DIAGNOSTIC 80652118Smckusick unsigned entry; 80752118Smckusick 80859842Sralph if (!pmap->pm_segtab) 80959842Sralph entry = kvtopte(va)->pt_entry; 81059842Sralph else { 81159842Sralph pte = pmap_segmap(pmap, va); 81259842Sralph if (pte) { 81359842Sralph pte += (va >> PGSHIFT) & 81459842Sralph (NPTEPG - 1); 81559842Sralph entry = pte->pt_entry; 81659842Sralph } else 81759842Sralph entry = 0; 81859842Sralph } 81952118Smckusick if (!(entry & PG_V) || 82052118Smckusick (entry & PG_FRAME) != pa) 82159842Sralph printf( 82259842Sralph "pmap_enter: found va %x pa %x in pv_table but != %x\n", 82359842Sralph va, pa, entry); 82452118Smckusick #endif 82552118Smckusick goto fnd; 82652118Smckusick } 82759842Sralph #ifdef DEBUG 82859842Sralph if (pmapdebug & PDB_PVENTRY) 82959842Sralph printf("pmap_enter: new pv: pmap %x va %x\n", 83059842Sralph pmap, va); 83159842Sralph #endif 83252118Smckusick /* can this cause us to recurse forever? */ 83352118Smckusick npv = (pv_entry_t) 83452118Smckusick malloc(sizeof *npv, M_VMPVENT, M_NOWAIT); 83552118Smckusick npv->pv_va = va; 83652118Smckusick npv->pv_pmap = pmap; 83752118Smckusick npv->pv_next = pv->pv_next; 83852118Smckusick pv->pv_next = npv; 83952118Smckusick #ifdef DEBUG 84052118Smckusick if (!npv->pv_next) 84152118Smckusick enter_stats.secondpv++; 84252118Smckusick #endif 84352118Smckusick fnd: 84452118Smckusick ; 84552118Smckusick } 84652118Smckusick splx(s); 84752118Smckusick } else { 84852118Smckusick /* 84952118Smckusick * Assumption: if it is not part of our managed memory 85052118Smckusick * then it must be device memory which may be volitile. 85152118Smckusick */ 85252118Smckusick #ifdef DEBUG 85352118Smckusick enter_stats.unmanaged++; 85452118Smckusick #endif 85558979Sralph npte = (prot & VM_PROT_WRITE) ? (PG_M | PG_N) : (PG_RO | PG_N); 85652118Smckusick } 85752118Smckusick 85852741Sralph /* 85952741Sralph * The only time we need to flush the cache is if we 86052741Sralph * execute from a physical address and then change the data. 86152741Sralph * This is the best place to do this. 86252741Sralph * pmap_protect() and pmap_remove() are mostly used to switch 86352741Sralph * between R/W and R/O pages. 86452741Sralph * NOTE: we only support cache flush for read only text. 86552741Sralph */ 86652741Sralph if (prot == (VM_PROT_READ | VM_PROT_EXECUTE)) 86753611Sralph MachFlushICache(MACH_PHYS_TO_CACHED(pa), PAGE_SIZE); 86852741Sralph 86959842Sralph if (!pmap->pm_segtab) { 87052118Smckusick /* enter entries into kernel pmap */ 87152118Smckusick pte = kvtopte(va); 87252118Smckusick npte |= pa | PG_V | PG_G; 87352118Smckusick if (wired) { 87452118Smckusick pmap->pm_stats.wired_count += pmaxpagesperpage; 87552118Smckusick npte |= PG_WIRED; 87652118Smckusick } 87752118Smckusick i = pmaxpagesperpage; 87852118Smckusick do { 87952118Smckusick if (!(pte->pt_entry & PG_V)) { 88052118Smckusick pmap->pm_stats.resident_count++; 88152118Smckusick } else { 88252937Sralph #ifdef DIAGNOSTIC 88352937Sralph if (pte->pt_entry & PG_WIRED) 88452937Sralph panic("pmap_enter: kernel wired"); 88552937Sralph #endif 88652118Smckusick } 88759842Sralph /* 88859842Sralph * Update the same virtual address entry. 88959842Sralph */ 89059842Sralph MachTLBUpdate(va, npte); 89152118Smckusick pte->pt_entry = npte; 89252118Smckusick va += NBPG; 89352118Smckusick npte += NBPG; 89452118Smckusick pte++; 89552118Smckusick } while (--i != 0); 89652118Smckusick return; 89752118Smckusick } 89852118Smckusick 89959842Sralph if (!(pte = pmap_segmap(pmap, va))) { 90059842Sralph mem = vm_page_alloc1(); 90159842Sralph pmap_zero_page(VM_PAGE_TO_PHYS(mem)); 90259842Sralph pmap_segmap(pmap, va) = pte = (pt_entry_t *) 90359842Sralph MACH_PHYS_TO_CACHED(VM_PAGE_TO_PHYS(mem)); 90459842Sralph } 90559842Sralph pte += (va >> PGSHIFT) & (NPTEPG - 1); 90659842Sralph 90752118Smckusick /* 90852118Smckusick * Now validate mapping with desired protection/wiring. 90952118Smckusick * Assume uniform modified and referenced status for all 91052118Smckusick * PMAX pages in a MACH page. 91152118Smckusick */ 91252118Smckusick npte |= pa | PG_V; 91352118Smckusick if (wired) { 91452118Smckusick pmap->pm_stats.wired_count += pmaxpagesperpage; 91552118Smckusick npte |= PG_WIRED; 91652118Smckusick } 91752118Smckusick #ifdef DEBUG 91859842Sralph if (pmapdebug & PDB_ENTER) { 91959842Sralph printf("pmap_enter: new pte %x", npte); 92059842Sralph if (pmap->pm_tlbgen == tlbpid_gen) 92159842Sralph printf(" tlbpid %d", pmap->pm_tlbpid); 92259842Sralph printf("\n"); 92359842Sralph } 92452118Smckusick #endif 92552118Smckusick i = pmaxpagesperpage; 92652118Smckusick do { 92759842Sralph pte->pt_entry = npte; 92859842Sralph if (pmap->pm_tlbgen == tlbpid_gen) 92959842Sralph MachTLBUpdate(va | (pmap->pm_tlbpid << 93059842Sralph VMMACH_TLB_PID_SHIFT), npte); 93152118Smckusick va += NBPG; 93252118Smckusick npte += NBPG; 93359842Sralph pte++; 93452118Smckusick } while (--i != 0); 93552118Smckusick } 93652118Smckusick 93752118Smckusick /* 93852118Smckusick * Routine: pmap_change_wiring 93952118Smckusick * Function: Change the wiring attribute for a map/virtual-address 94052118Smckusick * pair. 94152118Smckusick * In/out conditions: 94252118Smckusick * The mapping must already exist in the pmap. 94352118Smckusick */ 94452118Smckusick void 94552118Smckusick pmap_change_wiring(pmap, va, wired) 94652118Smckusick register pmap_t pmap; 94752118Smckusick vm_offset_t va; 94852118Smckusick boolean_t wired; 94952118Smckusick { 95059842Sralph register pt_entry_t *pte; 95152118Smckusick u_int p; 95259842Sralph register int i; 95352118Smckusick 95452118Smckusick #ifdef DEBUG 95559842Sralph if (pmapdebug & (PDB_FOLLOW|PDB_WIRING)) 95652118Smckusick printf("pmap_change_wiring(%x, %x, %x)\n", pmap, va, wired); 95752118Smckusick #endif 95852118Smckusick if (pmap == NULL) 95952118Smckusick return; 96052118Smckusick 96152118Smckusick p = wired ? PG_WIRED : 0; 96252118Smckusick 96352118Smckusick /* 96452118Smckusick * Don't need to flush the TLB since PG_WIRED is only in software. 96552118Smckusick */ 96659842Sralph if (!pmap->pm_segtab) { 96752118Smckusick /* change entries in kernel pmap */ 96852741Sralph #ifdef DIAGNOSTIC 96959842Sralph if (va < VM_MIN_KERNEL_ADDRESS || va >= virtual_end) 97052741Sralph panic("pmap_change_wiring"); 97152741Sralph #endif 97252118Smckusick pte = kvtopte(va); 97359842Sralph } else { 97459842Sralph if (!(pte = pmap_segmap(pmap, va))) 97559842Sralph return; 97659842Sralph pte += (va >> PGSHIFT) & (NPTEPG - 1); 97759842Sralph } 97859842Sralph 97959842Sralph i = pmaxpagesperpage; 98059842Sralph if (!(pte->pt_entry & PG_WIRED) && p) 98159842Sralph pmap->pm_stats.wired_count += i; 98259842Sralph else if ((pte->pt_entry & PG_WIRED) && !p) 98359842Sralph pmap->pm_stats.wired_count -= i; 98459842Sralph do { 98559842Sralph if (pte->pt_entry & PG_V) 98652118Smckusick pte->pt_entry = (pte->pt_entry & ~PG_WIRED) | p; 98759842Sralph pte++; 98859842Sralph } while (--i != 0); 98952118Smckusick } 99052118Smckusick 99152118Smckusick /* 99252118Smckusick * Routine: pmap_extract 99352118Smckusick * Function: 99452118Smckusick * Extract the physical page address associated 99552118Smckusick * with the given map/virtual_address pair. 99652118Smckusick */ 99752118Smckusick vm_offset_t 99852118Smckusick pmap_extract(pmap, va) 99952118Smckusick register pmap_t pmap; 100052118Smckusick vm_offset_t va; 100152118Smckusick { 100252118Smckusick register vm_offset_t pa; 100352118Smckusick 100452118Smckusick #ifdef DEBUG 100552118Smckusick if (pmapdebug & PDB_FOLLOW) 100652118Smckusick printf("pmap_extract(%x, %x) -> ", pmap, va); 100752118Smckusick #endif 100852118Smckusick 100959842Sralph if (!pmap->pm_segtab) { 101052741Sralph #ifdef DIAGNOSTIC 101159842Sralph if (va < VM_MIN_KERNEL_ADDRESS || va >= virtual_end) 101252741Sralph panic("pmap_extract"); 101352741Sralph #endif 101452118Smckusick pa = kvtopte(va)->pt_entry & PG_FRAME; 101559842Sralph } else { 101659842Sralph register pt_entry_t *pte; 101759842Sralph 101859842Sralph if (!(pte = pmap_segmap(pmap, va))) 101952118Smckusick pa = 0; 102059842Sralph else { 102159842Sralph pte += (va >> PGSHIFT) & (NPTEPG - 1); 102259842Sralph pa = pte->pt_entry & PG_FRAME; 102359842Sralph } 102459842Sralph } 102559842Sralph if (pa) 102659842Sralph pa |= va & PGOFSET; 102752118Smckusick 102852118Smckusick #ifdef DEBUG 102952118Smckusick if (pmapdebug & PDB_FOLLOW) 103059842Sralph printf("pmap_extract: pa %x\n", pa); 103152118Smckusick #endif 103252741Sralph return (pa); 103352118Smckusick } 103452118Smckusick 103552118Smckusick /* 103652118Smckusick * Copy the range specified by src_addr/len 103752118Smckusick * from the source map to the range dst_addr/len 103852118Smckusick * in the destination map. 103952118Smckusick * 104052118Smckusick * This routine is only advisory and need not do anything. 104152118Smckusick */ 104252741Sralph void 104352741Sralph pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) 104452118Smckusick pmap_t dst_pmap; 104552118Smckusick pmap_t src_pmap; 104652118Smckusick vm_offset_t dst_addr; 104752118Smckusick vm_size_t len; 104852118Smckusick vm_offset_t src_addr; 104952118Smckusick { 105052118Smckusick 105152118Smckusick #ifdef DEBUG 105252118Smckusick if (pmapdebug & PDB_FOLLOW) 105352118Smckusick printf("pmap_copy(%x, %x, %x, %x, %x)\n", 105452118Smckusick dst_pmap, src_pmap, dst_addr, len, src_addr); 105552118Smckusick #endif 105652118Smckusick } 105752118Smckusick 105852118Smckusick /* 105952118Smckusick * Require that all active physical maps contain no 106052118Smckusick * incorrect entries NOW. [This update includes 106152118Smckusick * forcing updates of any address map caching.] 106252118Smckusick * 106352118Smckusick * Generally used to insure that a thread about 106452118Smckusick * to run will see a semantically correct world. 106552118Smckusick */ 106652741Sralph void 106752741Sralph pmap_update() 106852118Smckusick { 106952118Smckusick 107052118Smckusick #ifdef DEBUG 107152118Smckusick if (pmapdebug & PDB_FOLLOW) 107252118Smckusick printf("pmap_update()\n"); 107352118Smckusick #endif 107452118Smckusick } 107552118Smckusick 107652118Smckusick /* 107752118Smckusick * Routine: pmap_collect 107852118Smckusick * Function: 107952118Smckusick * Garbage collects the physical map system for 108052118Smckusick * pages which are no longer used. 108152118Smckusick * Success need not be guaranteed -- that is, there 108252118Smckusick * may well be pages which are not referenced, but 108352118Smckusick * others may be collected. 108452118Smckusick * Usage: 108552118Smckusick * Called by the pageout daemon when pages are scarce. 108652118Smckusick */ 108752118Smckusick void 108852118Smckusick pmap_collect(pmap) 108952118Smckusick pmap_t pmap; 109052118Smckusick { 109152118Smckusick 109252118Smckusick #ifdef DEBUG 109352118Smckusick if (pmapdebug & PDB_FOLLOW) 109452118Smckusick printf("pmap_collect(%x)\n", pmap); 109552118Smckusick #endif 109652118Smckusick } 109752118Smckusick 109852118Smckusick /* 109952118Smckusick * pmap_zero_page zeros the specified (machine independent) 110052118Smckusick * page. 110152118Smckusick */ 110252118Smckusick void 110352118Smckusick pmap_zero_page(phys) 110452741Sralph vm_offset_t phys; 110552118Smckusick { 110652741Sralph register int *p, *end; 110752118Smckusick 110852118Smckusick #ifdef DEBUG 110952118Smckusick if (pmapdebug & PDB_FOLLOW) 111052118Smckusick printf("pmap_zero_page(%x)\n", phys); 111152118Smckusick #endif 111252741Sralph p = (int *)MACH_PHYS_TO_CACHED(phys); 111352741Sralph end = p + PAGE_SIZE / sizeof(int); 111452118Smckusick do { 111552741Sralph p[0] = 0; 111652741Sralph p[1] = 0; 111752741Sralph p[2] = 0; 111852741Sralph p[3] = 0; 111952741Sralph p += 4; 112052741Sralph } while (p != end); 112152118Smckusick } 112252118Smckusick 112352118Smckusick /* 112452118Smckusick * pmap_copy_page copies the specified (machine independent) 112552118Smckusick * page. 112652118Smckusick */ 112752118Smckusick void 112852118Smckusick pmap_copy_page(src, dst) 112952741Sralph vm_offset_t src, dst; 113052118Smckusick { 113152741Sralph register int *s, *d, *end; 113252741Sralph register int tmp0, tmp1, tmp2, tmp3; 113352118Smckusick 113452118Smckusick #ifdef DEBUG 113552118Smckusick if (pmapdebug & PDB_FOLLOW) 113652118Smckusick printf("pmap_copy_page(%x, %x)\n", src, dst); 113752118Smckusick #endif 113852741Sralph s = (int *)MACH_PHYS_TO_CACHED(src); 113952741Sralph d = (int *)MACH_PHYS_TO_CACHED(dst); 114052741Sralph end = s + PAGE_SIZE / sizeof(int); 114152118Smckusick do { 114252741Sralph tmp0 = s[0]; 114352741Sralph tmp1 = s[1]; 114452741Sralph tmp2 = s[2]; 114552741Sralph tmp3 = s[3]; 114652741Sralph d[0] = tmp0; 114752741Sralph d[1] = tmp1; 114852741Sralph d[2] = tmp2; 114952741Sralph d[3] = tmp3; 115052741Sralph s += 4; 115152741Sralph d += 4; 115252741Sralph } while (s != end); 115352118Smckusick } 115452118Smckusick 115552118Smckusick /* 115652118Smckusick * Routine: pmap_pageable 115752118Smckusick * Function: 115852118Smckusick * Make the specified pages (by pmap, offset) 115952118Smckusick * pageable (or not) as requested. 116052118Smckusick * 116152118Smckusick * A page which is not pageable may not take 116252118Smckusick * a fault; therefore, its page table entry 116352118Smckusick * must remain valid for the duration. 116452118Smckusick * 116552118Smckusick * This routine is merely advisory; pmap_enter 116652118Smckusick * will specify that these pages are to be wired 116752118Smckusick * down (or not) as appropriate. 116852118Smckusick */ 116952118Smckusick void 117052118Smckusick pmap_pageable(pmap, sva, eva, pageable) 117152118Smckusick pmap_t pmap; 117252118Smckusick vm_offset_t sva, eva; 117352118Smckusick boolean_t pageable; 117452118Smckusick { 117552118Smckusick 117652118Smckusick #ifdef DEBUG 117752118Smckusick if (pmapdebug & PDB_FOLLOW) 117852118Smckusick printf("pmap_pageable(%x, %x, %x, %x)\n", 117952118Smckusick pmap, sva, eva, pageable); 118052118Smckusick #endif 118152118Smckusick } 118252118Smckusick 118352118Smckusick /* 118452118Smckusick * Clear the modify bits on the specified physical page. 118552118Smckusick */ 118652118Smckusick void 118752118Smckusick pmap_clear_modify(pa) 118852118Smckusick vm_offset_t pa; 118952118Smckusick { 119052118Smckusick 119152118Smckusick #ifdef DEBUG 119252118Smckusick if (pmapdebug & PDB_FOLLOW) 119352118Smckusick printf("pmap_clear_modify(%x)\n", pa); 119452118Smckusick #endif 119552118Smckusick #ifdef ATTR 119659842Sralph pmap_attributes[atop(pa)] &= ~PMAP_ATTR_MOD; 119752118Smckusick #endif 119852118Smckusick } 119952118Smckusick 120052118Smckusick /* 120152118Smckusick * pmap_clear_reference: 120252118Smckusick * 120352118Smckusick * Clear the reference bit on the specified physical page. 120452118Smckusick */ 120552118Smckusick void 120652118Smckusick pmap_clear_reference(pa) 120752118Smckusick vm_offset_t pa; 120852118Smckusick { 120952118Smckusick 121052118Smckusick #ifdef DEBUG 121152118Smckusick if (pmapdebug & PDB_FOLLOW) 121252118Smckusick printf("pmap_clear_reference(%x)\n", pa); 121352118Smckusick #endif 121452118Smckusick #ifdef ATTR 121559842Sralph pmap_attributes[atop(pa)] &= ~PMAP_ATTR_REF; 121652118Smckusick #endif 121752118Smckusick } 121852118Smckusick 121952118Smckusick /* 122052118Smckusick * pmap_is_referenced: 122152118Smckusick * 122252118Smckusick * Return whether or not the specified physical page is referenced 122352118Smckusick * by any physical maps. 122452118Smckusick */ 122552118Smckusick boolean_t 122652118Smckusick pmap_is_referenced(pa) 122752118Smckusick vm_offset_t pa; 122852118Smckusick { 122952118Smckusick #ifdef ATTR 123059842Sralph return (pmap_attributes[atop(pa)] & PMAP_ATTR_REF); 123152118Smckusick #else 123252741Sralph return (FALSE); 123352118Smckusick #endif 123452118Smckusick } 123552118Smckusick 123652118Smckusick /* 123752118Smckusick * pmap_is_modified: 123852118Smckusick * 123952118Smckusick * Return whether or not the specified physical page is modified 124052118Smckusick * by any physical maps. 124152118Smckusick */ 124252118Smckusick boolean_t 124352118Smckusick pmap_is_modified(pa) 124452118Smckusick vm_offset_t pa; 124552118Smckusick { 124652118Smckusick #ifdef ATTR 124759842Sralph return (pmap_attributes[atop(pa)] & PMAP_ATTR_MOD); 124852118Smckusick #else 124952741Sralph return (FALSE); 125052118Smckusick #endif 125152118Smckusick } 125252118Smckusick 125352118Smckusick vm_offset_t 125452118Smckusick pmap_phys_address(ppn) 125552118Smckusick int ppn; 125652118Smckusick { 125752118Smckusick 125852118Smckusick #ifdef DEBUG 125952118Smckusick if (pmapdebug & PDB_FOLLOW) 126052118Smckusick printf("pmap_phys_address(%x)\n", ppn); 126152118Smckusick #endif 126252741Sralph return (pmax_ptob(ppn)); 126352118Smckusick } 126452118Smckusick 126552118Smckusick /* 126652118Smckusick * Miscellaneous support routines 126752118Smckusick */ 126852118Smckusick 126952118Smckusick /* 127052118Smckusick * Allocate a hardware PID and return it. 127159842Sralph * It takes almost as much or more time to search the TLB for a 127259842Sralph * specific PID and flush those entries as it does to flush the entire TLB. 127359842Sralph * Therefore, when we allocate a new PID, we just take the next number. When 127459842Sralph * we run out of numbers, we flush the TLB, increment the generation count 127559842Sralph * and start over. PID zero is reserved for kernel use. 127664619Sbostic * This is called only by switch(). 127752118Smckusick */ 127852118Smckusick int 127952118Smckusick pmap_alloc_tlbpid(p) 128052118Smckusick register struct proc *p; 128152118Smckusick { 128252118Smckusick register pmap_t pmap; 128352118Smckusick register int id; 128452118Smckusick 128552118Smckusick pmap = &p->p_vmspace->vm_pmap; 128659842Sralph if (pmap->pm_tlbgen != tlbpid_gen) { 128759842Sralph id = tlbpid_cnt; 128859842Sralph if (id == VMMACH_NUM_PIDS) { 128959842Sralph MachTLBFlush(); 129059842Sralph /* reserve tlbpid_gen == 0 to alway mean invalid */ 129159842Sralph if (++tlbpid_gen == 0) 129259842Sralph tlbpid_gen = 1; 129359842Sralph id = 1; 129452118Smckusick } 129559842Sralph tlbpid_cnt = id + 1; 129659842Sralph pmap->pm_tlbpid = id; 129759842Sralph pmap->pm_tlbgen = tlbpid_gen; 129859842Sralph } else 129959842Sralph id = pmap->pm_tlbpid; 130052118Smckusick 130152118Smckusick #ifdef DEBUG 130259842Sralph if (pmapdebug & (PDB_FOLLOW|PDB_TLBPID)) { 130359842Sralph if (curproc) 130459842Sralph printf("pmap_alloc_tlbpid: curproc %d '%s' ", 130559842Sralph curproc->p_pid, curproc->p_comm); 130659842Sralph else 130759842Sralph printf("pmap_alloc_tlbpid: curproc <none> "); 130859842Sralph printf("segtab %x tlbpid %d pid %d '%s'\n", 130959842Sralph pmap->pm_segtab, id, p->p_pid, p->p_comm); 131059842Sralph } 131152118Smckusick #endif 131252118Smckusick return (id); 131352118Smckusick } 131452118Smckusick 131552118Smckusick /* 131652118Smckusick * Remove a physical to virtual address translation. 131752118Smckusick */ 131852118Smckusick void 131952118Smckusick pmap_remove_pv(pmap, va, pa) 132052118Smckusick pmap_t pmap; 132152118Smckusick vm_offset_t va, pa; 132252118Smckusick { 132352118Smckusick register pv_entry_t pv, npv; 132452118Smckusick int s; 132552118Smckusick 132652118Smckusick #ifdef DEBUG 132759842Sralph if (pmapdebug & (PDB_FOLLOW|PDB_PVENTRY)) 132852118Smckusick printf("pmap_remove_pv(%x, %x, %x)\n", pmap, va, pa); 132952118Smckusick #endif 133052118Smckusick /* 133152118Smckusick * Remove page from the PV table (raise IPL since we 133252118Smckusick * may be called at interrupt time). 133352118Smckusick */ 133452118Smckusick if (!IS_VM_PHYSADDR(pa)) 133552118Smckusick return; 133652118Smckusick pv = pa_to_pvh(pa); 133752118Smckusick s = splimp(); 133852118Smckusick /* 133952118Smckusick * If it is the first entry on the list, it is actually 134052118Smckusick * in the header and we must copy the following entry up 134152118Smckusick * to the header. Otherwise we must search the list for 134252118Smckusick * the entry. In either case we free the now unused entry. 134352118Smckusick */ 134452118Smckusick if (pmap == pv->pv_pmap && va == pv->pv_va) { 134552118Smckusick npv = pv->pv_next; 134652118Smckusick if (npv) { 134752118Smckusick *pv = *npv; 134852118Smckusick free((caddr_t)npv, M_VMPVENT); 134952118Smckusick } else 135052118Smckusick pv->pv_pmap = NULL; 135152118Smckusick #ifdef DEBUG 135252118Smckusick remove_stats.pvfirst++; 135352118Smckusick #endif 135452118Smckusick } else { 135552118Smckusick for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) { 135652118Smckusick #ifdef DEBUG 135752118Smckusick remove_stats.pvsearch++; 135852118Smckusick #endif 135952118Smckusick if (pmap == npv->pv_pmap && va == npv->pv_va) 136052118Smckusick goto fnd; 136152118Smckusick } 136252118Smckusick #ifdef DIAGNOSTIC 136352118Smckusick printf("pmap_remove_pv(%x, %x, %x) not found\n", pmap, va, pa); 136452118Smckusick panic("pmap_remove_pv"); 136552118Smckusick #endif 136652118Smckusick fnd: 136752118Smckusick pv->pv_next = npv->pv_next; 136852118Smckusick free((caddr_t)npv, M_VMPVENT); 136952118Smckusick } 137052118Smckusick splx(s); 137152118Smckusick } 137252118Smckusick 137359842Sralph /* 137459842Sralph * vm_page_alloc1: 137559842Sralph * 137659842Sralph * Allocate and return a memory cell with no associated object. 137759842Sralph */ 137859842Sralph vm_page_t 137959842Sralph vm_page_alloc1() 138052118Smckusick { 138159842Sralph register vm_page_t mem; 138259842Sralph int spl; 138352118Smckusick 138459842Sralph spl = splimp(); /* XXX */ 138559842Sralph simple_lock(&vm_page_queue_free_lock); 138665597Smckusick if (vm_page_queue_free.tqh_first == NULL) { 138759842Sralph simple_unlock(&vm_page_queue_free_lock); 138859842Sralph splx(spl); 138959842Sralph return (NULL); 139059842Sralph } 139152118Smckusick 139265597Smckusick mem = vm_page_queue_free.tqh_first; 139365597Smckusick TAILQ_REMOVE(&vm_page_queue_free, mem, pageq); 139459842Sralph 139559842Sralph cnt.v_free_count--; 139659842Sralph simple_unlock(&vm_page_queue_free_lock); 139759842Sralph splx(spl); 139859842Sralph 139959842Sralph mem->flags = PG_BUSY | PG_CLEAN | PG_FAKE; 140059842Sralph mem->wire_count = 0; 140159842Sralph 140259842Sralph /* 140359842Sralph * Decide if we should poke the pageout daemon. 140459842Sralph * We do this if the free count is less than the low 140559842Sralph * water mark, or if the free count is less than the high 140659842Sralph * water mark (but above the low water mark) and the inactive 140759842Sralph * count is less than its target. 140859842Sralph * 140959842Sralph * We don't have the counts locked ... if they change a little, 141059842Sralph * it doesn't really matter. 141159842Sralph */ 141259842Sralph 141359842Sralph if (cnt.v_free_count < cnt.v_free_min || 141459842Sralph (cnt.v_free_count < cnt.v_free_target && 141559842Sralph cnt.v_inactive_count < cnt.v_inactive_target)) 141659842Sralph thread_wakeup((int)&vm_pages_needed); 141759842Sralph return (mem); 141859842Sralph } 141959842Sralph 142059842Sralph /* 142159842Sralph * vm_page_free1: 142259842Sralph * 142359842Sralph * Returns the given page to the free list, 142459842Sralph * disassociating it with any VM object. 142559842Sralph * 142659842Sralph * Object and page must be locked prior to entry. 142759842Sralph */ 142859842Sralph void 142959842Sralph vm_page_free1(mem) 143059842Sralph register vm_page_t mem; 143159842Sralph { 143259842Sralph 143359842Sralph if (mem->flags & PG_ACTIVE) { 143465597Smckusick TAILQ_REMOVE(&vm_page_queue_active, mem, pageq); 143559842Sralph mem->flags &= ~PG_ACTIVE; 143659842Sralph cnt.v_active_count--; 143752118Smckusick } 143859842Sralph 143959842Sralph if (mem->flags & PG_INACTIVE) { 144065597Smckusick TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq); 144159842Sralph mem->flags &= ~PG_INACTIVE; 144259842Sralph cnt.v_inactive_count--; 144352118Smckusick } 144459842Sralph 144559842Sralph if (!(mem->flags & PG_FICTITIOUS)) { 144659842Sralph int spl; 144759842Sralph 144859842Sralph spl = splimp(); 144959842Sralph simple_lock(&vm_page_queue_free_lock); 145065597Smckusick TAILQ_INSERT_TAIL(&vm_page_queue_free, mem, pageq); 145159842Sralph 145259842Sralph cnt.v_free_count++; 145359842Sralph simple_unlock(&vm_page_queue_free_lock); 145459842Sralph splx(spl); 145552118Smckusick } 145652118Smckusick } 1457