1 /* $NetBSD: pmap.h,v 1.80 2011/05/24 23:30:30 matt Exp $ */ 2 3 /* 4 * Copyright (c) 1991 Regents of the University of California. 5 * All rights reserved. 6 * 7 * Changed for the VAX port. /IC 8 * 9 * This code is derived from software contributed to Berkeley by 10 * the Systems Programming Group of the University of Utah Computer 11 * Science Department. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)pmap.h 7.6 (Berkeley) 5/10/91 38 */ 39 40 /* 41 * Copyright (c) 1987 Carnegie-Mellon University 42 * 43 * Changed for the VAX port. /IC 44 * 45 * This code is derived from software contributed to Berkeley by 46 * the Systems Programming Group of the University of Utah Computer 47 * Science Department. 48 * 49 * Redistribution and use in source and binary forms, with or without 50 * modification, are permitted provided that the following conditions 51 * are met: 52 * 1. Redistributions of source code must retain the above copyright 53 * notice, this list of conditions and the following disclaimer. 54 * 2. Redistributions in binary form must reproduce the above copyright 55 * notice, this list of conditions and the following disclaimer in the 56 * documentation and/or other materials provided with the distribution. 57 * 3. All advertising materials mentioning features or use of this software 58 * must display the following acknowledgement: 59 * This product includes software developed by the University of 60 * California, Berkeley and its contributors. 61 * 4. Neither the name of the University nor the names of its contributors 62 * may be used to endorse or promote products derived from this software 63 * without specific prior written permission. 64 * 65 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 66 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 67 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 68 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 69 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 70 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 71 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 72 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 73 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 74 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 75 * SUCH DAMAGE. 76 * 77 * @(#)pmap.h 7.6 (Berkeley) 5/10/91 78 */ 79 80 81 #ifndef PMAP_H 82 #define PMAP_H 83 84 #include <sys/atomic.h> 85 86 #include <uvm/uvm_page.h> 87 88 #include <machine/pte.h> 89 #include <machine/mtpr.h> 90 #include <machine/pcb.h> 91 92 /* 93 * Some constants to make life easier. 94 */ 95 #define LTOHPS (PGSHIFT - VAX_PGSHIFT) 96 #define LTOHPN (1 << LTOHPS) 97 98 /* 99 * Pmap structure 100 * pm_stack holds lowest allocated memory for the process stack. 101 */ 102 103 struct pmap { 104 struct pte *pm_p1ap; /* Base of alloced p1 pte space */ 105 u_int pm_count; /* reference count */ 106 struct pcb *pm_pcbs; /* PCBs using this pmap */ 107 struct pte *pm_p0br; /* page 0 base register */ 108 long pm_p0lr; /* page 0 length register */ 109 struct pte *pm_p1br; /* page 1 base register */ 110 long pm_p1lr; /* page 1 length register */ 111 struct pmap_statistics pm_stats; /* Some statistics */ 112 }; 113 114 /* 115 * For each struct vm_page, there is a list of all currently valid virtual 116 * mappings of that page. An entry is a pv_entry_t, the list is pv_table. 117 */ 118 119 struct pv_entry { 120 struct pv_entry *pv_next; /* next pv_entry */ 121 vaddr_t pv_vaddr; /* address for this physical page */ 122 struct pmap *pv_pmap; /* pmap this entry belongs to */ 123 int pv_attr; /* write/modified bits */ 124 }; 125 126 extern struct pv_entry *pv_table; 127 128 /* Mapping macros used when allocating SPT */ 129 #define MAPVIRT(ptr, count) \ 130 ptr = virtual_avail; \ 131 virtual_avail += (count) * VAX_NBPG; 132 133 #define MAPPHYS(ptr, count, perm) \ 134 ptr = avail_start + KERNBASE; \ 135 avail_start += (count) * VAX_NBPG; 136 137 138 /* 139 * Real nice (fast) routines to get the virtual address of a physical page 140 * (and vice versa). 141 */ 142 #define PMAP_VTOPHYS(va) ((va) & ~KERNBASE) 143 #define PMAP_MAP_POOLPAGE(pa) ((pa) | KERNBASE) 144 #define PMAP_UNMAP_POOLPAGE(va) ((va) & ~KERNBASE) 145 146 #define PMAP_STEAL_MEMORY 147 148 /* 149 * This is the by far most used pmap routine. Make it inline. 150 */ 151 static __inline bool 152 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap) 153 { 154 int *pte, sva; 155 156 if (va & KERNBASE) { 157 paddr_t pa; 158 159 pa = kvtophys(va); /* Is 0 if not mapped */ 160 if (pap) 161 *pap = pa; 162 if (pa) 163 return (true); 164 return (false); 165 } 166 167 sva = PG_PFNUM(va); 168 if (va < 0x40000000) { 169 if (sva >= (pmap->pm_p0lr & ~AST_MASK)) 170 goto fail; 171 pte = (int *)pmap->pm_p0br; 172 } else { 173 if (sva < pmap->pm_p1lr) 174 goto fail; 175 pte = (int *)pmap->pm_p1br; 176 } 177 /* 178 * Since the PTE tables are sparsely allocated, make sure the page 179 * table page actually exists before deferencing the pte itself. 180 */ 181 if (kvtopte(&pte[sva])->pg_v && (pte[sva] & PG_FRAME)) { 182 if (pap) 183 *pap = (pte[sva] & PG_FRAME) << VAX_PGSHIFT; 184 return (true); 185 } 186 fail: 187 if (pap) 188 *pap = 0; 189 return (false); 190 } 191 192 bool pmap_clear_modify_long(const struct pv_entry *); 193 bool pmap_clear_reference_long(const struct pv_entry *); 194 bool pmap_is_modified_long_p(const struct pv_entry *); 195 void pmap_page_protect_long(struct pv_entry *, vm_prot_t); 196 void pmap_protect_long(pmap_t, vaddr_t, vaddr_t, vm_prot_t); 197 198 static __inline struct pv_entry * 199 pmap_pg_to_pv(const struct vm_page *pg) 200 { 201 return pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT); 202 } 203 204 static __inline bool 205 pmap_is_referenced(struct vm_page *pg) 206 { 207 const struct pv_entry * const pv = pmap_pg_to_pv(pg); 208 209 return (pv->pv_attr & PG_V) != 0; 210 } 211 212 static __inline bool 213 pmap_clear_reference(struct vm_page *pg) 214 { 215 struct pv_entry * const pv = pmap_pg_to_pv(pg); 216 bool rv = (pv->pv_attr & PG_V) != 0; 217 218 pv->pv_attr &= ~PG_V; 219 if (pv->pv_pmap != NULL || pv->pv_next != NULL) 220 rv |= pmap_clear_reference_long(pv); 221 return rv; 222 } 223 224 static __inline bool 225 pmap_clear_modify(struct vm_page *pg) 226 { 227 struct pv_entry * const pv = pmap_pg_to_pv(pg); 228 bool rv = (pv->pv_attr & PG_M) != 0; 229 230 pv->pv_attr &= ~PG_M; 231 if (pv->pv_pmap != NULL || pv->pv_next != NULL) 232 rv |= pmap_clear_modify_long(pv); 233 return rv; 234 } 235 236 static __inline bool 237 pmap_is_modified(struct vm_page *pg) 238 { 239 const struct pv_entry * const pv = pmap_pg_to_pv(pg); 240 241 return (pv->pv_attr & PG_M) != 0 || pmap_is_modified_long_p(pv); 242 } 243 244 static __inline void 245 pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 246 { 247 struct pv_entry * const pv = pmap_pg_to_pv(pg); 248 249 if (pv->pv_pmap != NULL || pv->pv_next != NULL) 250 pmap_page_protect_long(pv, prot); 251 } 252 253 static __inline void 254 pmap_protect(pmap_t pmap, vaddr_t start, vaddr_t end, vm_prot_t prot) 255 { 256 if (pmap->pm_p0lr != 0 || pmap->pm_p1lr != 0x200000 || 257 (start & KERNBASE) != 0) 258 pmap_protect_long(pmap, start, end, prot); 259 } 260 261 static __inline void 262 pmap_remove_all(struct pmap *pmap) 263 { 264 /* Nothing. */ 265 } 266 267 /* Routines that are best to define as macros */ 268 #define pmap_phys_address(phys) ((u_int)(phys) << PGSHIFT) 269 #define pmap_copy(a,b,c,d,e) /* Dont do anything */ 270 #define pmap_update(pmap) /* nothing (yet) */ 271 #define pmap_remove(pmap, start, end) pmap_protect(pmap, start, end, 0) 272 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) 273 #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count) 274 #define pmap_reference(pmap) atomic_inc_uint(&(pmap)->pm_count) 275 276 /* These can be done as efficient inline macros */ 277 #define pmap_copy_page(src, dst) \ 278 __asm("addl3 $0x80000000,%0,%%r0;" \ 279 "addl3 $0x80000000,%1,%%r1;" \ 280 "movc3 $4096,(%%r0),(%%r1)" \ 281 :: "r"(src), "r"(dst) \ 282 : "r0","r1","r2","r3","r4","r5"); 283 284 #define pmap_zero_page(phys) \ 285 __asm("addl3 $0x80000000,%0,%%r0;" \ 286 "movc5 $0,(%%r0),$0,$4096,(%%r0)" \ 287 :: "r"(phys) \ 288 : "r0","r1","r2","r3","r4","r5"); 289 290 /* Prototypes */ 291 void pmap_bootstrap(void); 292 vaddr_t pmap_map(vaddr_t, vaddr_t, vaddr_t, int); 293 294 #if 0 295 #define __HAVE_VM_PAGE_MD 296 297 struct vm_page_md { 298 unsigned int md_attrs; 299 }; 300 301 #define VM_MDPAGE_INIT(pg) ((pg)->mdpage.md_attrs = 0) 302 #endif 303 304 #endif /* PMAP_H */ 305