1 /* $NetBSD: pmap.h,v 1.58 2014/04/14 10:54:08 martin Exp $ */ 2 3 /*- 4 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 5 * Copyright (C) 1995, 1996 TooLs GmbH. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by TooLs GmbH. 19 * 4. The name of TooLs GmbH may not be used to endorse or promote products 20 * derived from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 28 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 29 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 30 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 31 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #ifndef _MACHINE_PMAP_H_ 35 #define _MACHINE_PMAP_H_ 36 37 #ifndef _LOCORE 38 #include <machine/pte.h> 39 #include <sys/queue.h> 40 #include <uvm/uvm_object.h> 41 #ifdef _KERNEL 42 #include <machine/cpuset.h> 43 #endif 44 #endif 45 46 /* 47 * This scheme uses 2-level page tables. 48 * 49 * While we're still in 32-bit mode we do the following: 50 * 51 * offset: 13 bits 52 * 1st level: 1024 64-bit TTEs in an 8K page for 10 bits 53 * 2nd level: 512 32-bit pointers in the pmap for 9 bits 54 * ------- 55 * total: 32 bits 56 * 57 * In 64-bit mode the Spitfire and Blackbird CPUs support only 58 * 44-bit virtual addresses. All addresses between 59 * 0x0000 07ff ffff ffff and 0xffff f800 0000 0000 are in the 60 * "VA hole" and trap, so we don't have to track them. However, 61 * we do need to keep them in mind during PT walking. If they 62 * ever change the size of the address "hole" we need to rework 63 * all the page table handling. 64 * 65 * offset: 13 bits 66 * 1st level: 1024 64-bit TTEs in an 8K page for 10 bits 67 * 2nd level: 1024 64-bit pointers in an 8K page for 10 bits 68 * 3rd level: 1024 64-bit pointers in the segmap for 10 bits 69 * ------- 70 * total: 43 bits 71 * 72 * Of course, this means for 32-bit spaces we always have a (practically) 73 * wasted page for the segmap (only one entry used) and half a page wasted 74 * for the page directory. We still have need of one extra bit 8^(. 75 */ 76 77 #define HOLESHIFT (43) 78 79 #define PTSZ (PAGE_SIZE/8) /* page table entry */ 80 #define PDSZ (PTSZ) /* page directory */ 81 #define STSZ (PTSZ) /* psegs */ 82 83 #define PTSHIFT (13) 84 #define PDSHIFT (10+PTSHIFT) 85 #define STSHIFT (10+PDSHIFT) 86 87 #define PTMASK (PTSZ-1) 88 #define PDMASK (PDSZ-1) 89 #define STMASK (STSZ-1) 90 91 #ifndef _LOCORE 92 93 #ifdef _LP64 94 int sparc64_mmap_range_test(vaddr_t, vaddr_t); 95 #define MD_MMAP_RANGE_TEST(MINVA, MAXVA) sparc64_mmap_range_test(MINVA, MAXVA) 96 #endif 97 98 /* 99 * Support for big page sizes. This maps the page size to the 100 * page bits. 101 */ 102 struct page_size_map { 103 uint64_t mask; 104 uint64_t code; 105 #if defined(DEBUG) || 1 106 uint64_t use; 107 #endif 108 }; 109 extern struct page_size_map page_size_map[]; 110 111 /* 112 * Pmap stuff 113 */ 114 115 #define va_to_seg(v) (int)((((paddr_t)(v))>>STSHIFT)&STMASK) 116 #define va_to_dir(v) (int)((((paddr_t)(v))>>PDSHIFT)&PDMASK) 117 #define va_to_pte(v) (int)((((paddr_t)(v))>>PTSHIFT)&PTMASK) 118 119 #ifdef MULTIPROCESSOR 120 #define PMAP_LIST_MAXNUMCPU CPUSET_MAXNUMCPU 121 #else 122 #define PMAP_LIST_MAXNUMCPU 1 123 #endif 124 125 struct pmap { 126 struct uvm_object pm_obj; 127 kmutex_t pm_obj_lock; 128 #define pm_lock pm_obj.vmobjlock 129 #define pm_refs pm_obj.uo_refs 130 LIST_ENTRY(pmap) pm_list[PMAP_LIST_MAXNUMCPU]; /* per cpu ctx used list */ 131 132 struct pmap_statistics pm_stats; 133 134 /* 135 * We record the context used on any cpu here. If the context 136 * is actually present in the TLB, it will be the plain context 137 * number. If the context is allocated, but has been flushed 138 * from the tlb, the number will be negative. 139 * If this pmap has no context allocated on that cpu, the entry 140 * will be 0. 141 */ 142 int pm_ctx[PMAP_LIST_MAXNUMCPU]; /* Current context per cpu */ 143 144 /* 145 * This contains 64-bit pointers to pages that contain 146 * 1024 64-bit pointers to page tables. All addresses 147 * are physical. 148 * 149 * !!! Only touch this through pseg_get() and pseg_set() !!! 150 */ 151 paddr_t pm_physaddr; /* physical address of pm_segs */ 152 int64_t *pm_segs; 153 }; 154 155 /* 156 * This comes from the PROM and is used to map prom entries. 157 */ 158 struct prom_map { 159 uint64_t vstart; 160 uint64_t vsize; 161 uint64_t tte; 162 }; 163 164 #define PMAP_NC 0x001 /* Set the E bit in the page */ 165 #define PMAP_NVC 0x002 /* Don't enable the virtual cache */ 166 #define PMAP_LITTLE 0x004 /* Map in little endian mode */ 167 /* Large page size hints -- 168 we really should use another param to pmap_enter() */ 169 #define PMAP_8K 0x000 170 #define PMAP_64K 0x008 /* Use 64K page */ 171 #define PMAP_512K 0x010 172 #define PMAP_4M 0x018 173 #define PMAP_SZ_TO_TTE(x) (((x)&0x018)<<58) 174 /* If these bits are different in va's to the same PA 175 then there is an aliasing in the d$ */ 176 #define VA_ALIAS_MASK (1 << 13) 177 178 #ifdef _KERNEL 179 #ifdef PMAP_COUNT_DEBUG 180 /* diagnostic versions if PMAP_COUNT_DEBUG option is used */ 181 int pmap_count_res(struct pmap *); 182 int pmap_count_wired(struct pmap *); 183 #define pmap_resident_count(pm) pmap_count_res((pm)) 184 #define pmap_wired_count(pm) pmap_count_wired((pm)) 185 #else 186 #define pmap_resident_count(pm) ((pm)->pm_stats.resident_count) 187 #define pmap_wired_count(pm) ((pm)->pm_stats.wired_count) 188 #endif 189 190 #define pmap_phys_address(x) (x) 191 192 void pmap_activate_pmap(struct pmap *); 193 void pmap_update(struct pmap *); 194 void pmap_bootstrap(u_long, u_long); 195 196 /* make sure all page mappings are modulo 16K to prevent d$ aliasing */ 197 #define PMAP_PREFER(fo, va, sz, td) pmap_prefer((fo), (va), (td)) 198 static inline void 199 pmap_prefer(vaddr_t fo, vaddr_t *va, int td) 200 { 201 vaddr_t newva; 202 vaddr_t m; 203 204 m = 2 * PAGE_SIZE; 205 newva = (*va & ~(m - 1)) | (fo & (m - 1)); 206 207 if (td) { 208 if (newva > *va) 209 newva -= m; 210 } else { 211 if (newva < *va) 212 newva += m; 213 } 214 *va = newva; 215 } 216 217 #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */ 218 #define PMAP_NEED_PROCWR 219 220 void pmap_procwr(struct proc *, vaddr_t, size_t); 221 222 /* SPARC specific? */ 223 int pmap_dumpsize(void); 224 int pmap_dumpmmu(int (*)(dev_t, daddr_t, void *, size_t), 225 daddr_t); 226 int pmap_pa_exists(paddr_t); 227 void switchexit(struct lwp *, int); 228 void pmap_kprotect(vaddr_t, vm_prot_t); 229 230 /* SPARC64 specific */ 231 void pmap_copy_page_phys(paddr_t, paddr_t); 232 void pmap_zero_page_phys(paddr_t); 233 234 #ifdef SUN4V 235 /* sun4v specific */ 236 void pmap_setup_intstack_sun4v(paddr_t); 237 void pmap_setup_tsb_sun4v(void); 238 #endif 239 240 /* Installed physical memory, as discovered during bootstrap. */ 241 extern int phys_installed_size; 242 extern struct mem_region *phys_installed; 243 244 #define __HAVE_VM_PAGE_MD 245 246 /* 247 * For each struct vm_page, there is a list of all currently valid virtual 248 * mappings of that page. An entry is a pv_entry_t. 249 */ 250 struct pmap; 251 typedef struct pv_entry { 252 struct pv_entry *pv_next; /* next pv_entry */ 253 struct pmap *pv_pmap; /* pmap where mapping lies */ 254 vaddr_t pv_va; /* virtual address for mapping */ 255 } *pv_entry_t; 256 /* PV flags encoded in the low bits of the VA of the first pv_entry */ 257 258 struct vm_page_md { 259 struct pv_entry mdpg_pvh; 260 }; 261 #define VM_MDPAGE_INIT(pg) \ 262 do { \ 263 (pg)->mdpage.mdpg_pvh.pv_next = NULL; \ 264 (pg)->mdpage.mdpg_pvh.pv_pmap = NULL; \ 265 (pg)->mdpage.mdpg_pvh.pv_va = 0; \ 266 } while (/*CONSTCOND*/0) 267 268 #endif /* _KERNEL */ 269 270 #endif /* _LOCORE */ 271 #endif /* _MACHINE_PMAP_H_ */ 272