1 /* $NetBSD: pmap.h,v 1.46 2001/06/04 15:36:00 ragge Exp $ */ 2 3 /* 4 * Copyright (c) 1987 Carnegie-Mellon University 5 * Copyright (c) 1991 Regents of the University of California. 6 * All rights reserved. 7 * 8 * Changed for the VAX port. /IC 9 * 10 * This code is derived from software contributed to Berkeley by 11 * the Systems Programming Group of the University of Utah Computer 12 * Science Department. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. All advertising materials mentioning features or use of this software 23 * must display the following acknowledgement: 24 * This product includes software developed by the University of 25 * California, Berkeley and its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * @(#)pmap.h 7.6 (Berkeley) 5/10/91 43 */ 44 45 46 #ifndef PMAP_H 47 #define PMAP_H 48 49 #include <machine/pte.h> 50 #include <machine/mtpr.h> 51 #include <machine/pcb.h> 52 53 /* 54 * Some constants to make life easier. 55 */ 56 #define LTOHPS (PGSHIFT - VAX_PGSHIFT) 57 #define LTOHPN (1 << LTOHPS) 58 #define USRPTSIZE ((MAXTSIZ + MAXDSIZ + MAXSSIZ + MMAPSPACE) / VAX_NBPG) 59 #define NPTEPGS (USRPTSIZE / (sizeof(struct pte) * LTOHPN)) 60 61 /* 62 * Pmap structure 63 * pm_stack holds lowest allocated memory for the process stack. 64 */ 65 66 typedef struct pmap { 67 vaddr_t pm_stack; /* Base of alloced p1 pte space */ 68 int ref_count; /* reference count */ 69 struct pte *pm_p0br; /* page 0 base register */ 70 long pm_p0lr; /* page 0 length register */ 71 struct pte *pm_p1br; /* page 1 base register */ 72 long pm_p1lr; /* page 1 length register */ 73 struct simplelock pm_lock; /* Lock entry in MP environment */ 74 struct pmap_statistics pm_stats; /* Some statistics */ 75 u_char pm_refcnt[NPTEPGS]; /* Refcount per pte page */ 76 } *pmap_t; 77 78 /* 79 * For each struct vm_page, there is a list of all currently valid virtual 80 * mappings of that page. An entry is a pv_entry_t, the list is pv_table. 81 */ 82 83 struct pv_entry { 84 struct pv_entry *pv_next; /* next pv_entry */ 85 struct pte *pv_pte; /* pte for this physical page */ 86 struct pmap *pv_pmap; /* pmap this entry belongs to */ 87 int pv_attr; /* write/modified bits */ 88 }; 89 90 /* Mapping macros used when allocating SPT */ 91 #define MAPVIRT(ptr, count) \ 92 (vaddr_t)ptr = virtual_avail; \ 93 virtual_avail += (count) * VAX_NBPG; 94 95 #define MAPPHYS(ptr, count, perm) \ 96 (vaddr_t)ptr = avail_start + KERNBASE; \ 97 avail_start += (count) * VAX_NBPG; 98 99 #ifdef _KERNEL 100 101 extern struct pmap kernel_pmap_store; 102 103 #define pmap_kernel() (&kernel_pmap_store) 104 105 #endif /* _KERNEL */ 106 107 /* 108 * Real nice (fast) routines to get the virtual address of a physical page 109 * (and vice versa). 110 */ 111 #define PMAP_MAP_POOLPAGE(pa) ((pa) | KERNBASE) 112 #define PMAP_UNMAP_POOLPAGE(va) ((va) & ~KERNBASE) 113 114 #define PMAP_STEAL_MEMORY 115 116 /* 117 * This is the by far most used pmap routine. Make it inline. 118 */ 119 __inline static boolean_t 120 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap) 121 { 122 paddr_t pa = 0; 123 int *pte, sva; 124 125 if (va & KERNBASE) { 126 pa = kvtophys(va); /* Is 0 if not mapped */ 127 if (pap) 128 *pap = pa; 129 if (pa) 130 return (TRUE); 131 return (FALSE); 132 } 133 134 sva = PG_PFNUM(va); 135 if (va < 0x40000000) { 136 if (sva > (pmap->pm_p0lr & ~AST_MASK)) 137 return FALSE; 138 pte = (int *)pmap->pm_p0br; 139 } else { 140 if (sva < pmap->pm_p1lr) 141 return FALSE; 142 pte = (int *)pmap->pm_p1br; 143 } 144 if (kvtopte(&pte[sva])->pg_pfn) { 145 if (pap) 146 *pap = (pte[sva] & PG_FRAME) << VAX_PGSHIFT; 147 return (TRUE); 148 } 149 return (FALSE); 150 } 151 152 153 /* Routines that are best to define as macros */ 154 #define pmap_phys_address(phys) ((u_int)(phys) << PGSHIFT) 155 #define pmap_copy(a,b,c,d,e) /* Dont do anything */ 156 #define pmap_update() /* nothing (yet) */ 157 #define pmap_collect(pmap) /* No need so far */ 158 #define pmap_remove(pmap, start, slut) pmap_protect(pmap, start, slut, 0) 159 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) 160 #define pmap_deactivate(p) /* Dont do anything */ 161 #define pmap_reference(pmap) (pmap)->ref_count++ 162 163 /* These can be done as efficient inline macros */ 164 #define pmap_copy_page(src, dst) \ 165 __asm__("addl3 $0x80000000,%0,r0;addl3 $0x80000000,%1,r1; \ 166 movc3 $4096,(r0),(r1)" \ 167 :: "r"(src),"r"(dst):"r0","r1","r2","r3","r4","r5"); 168 169 #define pmap_zero_page(phys) \ 170 __asm__("addl3 $0x80000000,%0,r0;movc5 $0,(r0),$0,$4096,(r0)" \ 171 :: "r"(phys): "r0","r1","r2","r3","r4","r5"); 172 173 /* Prototypes */ 174 void pmap_bootstrap __P((void)); 175 vaddr_t pmap_map __P((vaddr_t, vaddr_t, vaddr_t, int)); 176 177 #endif PMAP_H 178