1 /* $NetBSD: pmap.h,v 1.29 2010/02/09 22:51:13 jym Exp $ */ 2 3 /* 4 * 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgment: 18 * This product includes software developed by Charles D. Cranor and 19 * Washington University. 20 * 4. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 /* 36 * Copyright (c) 2001 Wasabi Systems, Inc. 37 * All rights reserved. 38 * 39 * Written by Frank van der Linden for Wasabi Systems, Inc. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. All advertising materials mentioning features or use of this software 50 * must display the following acknowledgement: 51 * This product includes software developed for the NetBSD Project by 52 * Wasabi Systems, Inc. 53 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 54 * or promote products derived from this software without specific prior 55 * written permission. 56 * 57 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 58 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 59 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 60 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 61 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 62 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 63 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 64 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 65 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 66 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 67 * POSSIBILITY OF SUCH DAMAGE. 68 */ 69 70 /* 71 * pmap.h: see pmap.c for the history of this pmap module. 72 */ 73 74 #ifndef _X86_PMAP_H_ 75 #define _X86_PMAP_H_ 76 77 #define ptei(VA) (((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT) 78 79 /* 80 * pl*_pi: index in the ptp page for a pde mapping a VA. 81 * (pl*_i below is the index in the virtual array of all pdes per level) 82 */ 83 #define pl1_pi(VA) (((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT) 84 #define pl2_pi(VA) (((VA_SIGN_POS(VA)) & L2_MASK) >> L2_SHIFT) 85 #define pl3_pi(VA) (((VA_SIGN_POS(VA)) & L3_MASK) >> L3_SHIFT) 86 #define pl4_pi(VA) (((VA_SIGN_POS(VA)) & L4_MASK) >> L4_SHIFT) 87 88 /* 89 * pl*_i: generate index into pde/pte arrays in virtual space 90 */ 91 #define pl1_i(VA) (((VA_SIGN_POS(VA)) & L1_FRAME) >> L1_SHIFT) 92 #define pl2_i(VA) (((VA_SIGN_POS(VA)) & L2_FRAME) >> L2_SHIFT) 93 #define pl3_i(VA) (((VA_SIGN_POS(VA)) & L3_FRAME) >> L3_SHIFT) 94 #define pl4_i(VA) (((VA_SIGN_POS(VA)) & L4_FRAME) >> L4_SHIFT) 95 #define pl_i(va, lvl) \ 96 (((VA_SIGN_POS(va)) & ptp_masks[(lvl)-1]) >> ptp_shifts[(lvl)-1]) 97 98 #define pl_i_roundup(va, lvl) pl_i((va)+ ~ptp_masks[(lvl)-1], (lvl)) 99 100 /* 101 * PTP macros: 102 * a PTP's index is the PD index of the PDE that points to it 103 * a PTP's offset is the byte-offset in the PTE space that this PTP is at 104 * a PTP's VA is the first VA mapped by that PTP 105 */ 106 107 #define ptp_va2o(va, lvl) (pl_i(va, (lvl)+1) * PAGE_SIZE) 108 109 /* size of a PDP: usually one page, except for PAE */ 110 #ifdef PAE 111 #define PDP_SIZE 4 112 #else 113 #define PDP_SIZE 1 114 #endif 115 116 117 #if defined(_KERNEL) 118 /* 119 * pmap data structures: see pmap.c for details of locking. 120 */ 121 122 /* 123 * we maintain a list of all non-kernel pmaps 124 */ 125 126 LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */ 127 128 /* 129 * the pmap structure 130 * 131 * note that the pm_obj contains the simple_lock, the reference count, 132 * page list, and number of PTPs within the pmap. 133 * 134 * pm_lock is the same as the spinlock for vm object 0. Changes to 135 * the other objects may only be made if that lock has been taken 136 * (the other object locks are only used when uvm_pagealloc is called) 137 * 138 * XXX If we ever support processor numbers higher than 31, we'll have 139 * XXX to rethink the CPU mask. 140 */ 141 142 struct pmap { 143 struct uvm_object pm_obj[PTP_LEVELS-1]; /* objects for lvl >= 1) */ 144 #define pm_lock pm_obj[0].vmobjlock 145 LIST_ENTRY(pmap) pm_list; /* list (lck by pm_list lock) */ 146 pd_entry_t *pm_pdir; /* VA of PD (lck by object lock) */ 147 #ifdef PAE 148 paddr_t pm_pdirpa[PDP_SIZE]; 149 #else 150 paddr_t pm_pdirpa; /* PA of PD (read-only after create) */ 151 #endif 152 struct vm_page *pm_ptphint[PTP_LEVELS-1]; 153 /* pointer to a PTP in our pmap */ 154 struct pmap_statistics pm_stats; /* pmap stats (lck by object lock) */ 155 156 #if !defined(__x86_64__) 157 vaddr_t pm_hiexec; /* highest executable mapping */ 158 #endif /* !defined(__x86_64__) */ 159 int pm_flags; /* see below */ 160 161 union descriptor *pm_ldt; /* user-set LDT */ 162 size_t pm_ldt_len; /* size of LDT in bytes */ 163 int pm_ldt_sel; /* LDT selector */ 164 uint32_t pm_cpus; /* mask of CPUs using pmap */ 165 uint32_t pm_kernel_cpus; /* mask of CPUs using kernel part 166 of pmap */ 167 }; 168 169 /* macro to access pm_pdirpa */ 170 #ifdef PAE 171 #define pmap_pdirpa(pmap, index) \ 172 ((pmap)->pm_pdirpa[l2tol3(index)] + l2tol2(index) * sizeof(pd_entry_t)) 173 #else 174 #define pmap_pdirpa(pmap, index) \ 175 ((pmap)->pm_pdirpa + (index) * sizeof(pd_entry_t)) 176 #endif 177 178 /* 179 * MD flags that we use for pmap_enter and pmap_kenter_pa: 180 */ 181 #define PMAP_NOCACHE 0x01000000 /* set the non-cacheable bit */ 182 183 /* 184 * global kernel variables 185 */ 186 187 /* PDPpaddr: is the physical address of the kernel's PDP */ 188 extern u_long PDPpaddr; 189 190 extern int pmap_pg_g; /* do we support PG_G? */ 191 extern long nkptp[PTP_LEVELS]; 192 193 /* 194 * macros 195 */ 196 197 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) 198 #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count) 199 200 #define pmap_clear_modify(pg) pmap_clear_attrs(pg, PG_M) 201 #define pmap_clear_reference(pg) pmap_clear_attrs(pg, PG_U) 202 #define pmap_copy(DP,SP,D,L,S) 203 #define pmap_is_modified(pg) pmap_test_attrs(pg, PG_M) 204 #define pmap_is_referenced(pg) pmap_test_attrs(pg, PG_U) 205 #define pmap_move(DP,SP,D,L,S) 206 #define pmap_phys_address(ppn) x86_ptob(ppn) 207 #define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */ 208 209 210 /* 211 * prototypes 212 */ 213 214 void pmap_activate(struct lwp *); 215 void pmap_bootstrap(vaddr_t); 216 bool pmap_clear_attrs(struct vm_page *, unsigned); 217 void pmap_deactivate(struct lwp *); 218 void pmap_page_remove (struct vm_page *); 219 void pmap_remove(struct pmap *, vaddr_t, vaddr_t); 220 bool pmap_test_attrs(struct vm_page *, unsigned); 221 void pmap_write_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t); 222 void pmap_load(void); 223 paddr_t pmap_init_tmp_pgtbl(paddr_t); 224 void pmap_remove_all(struct pmap *); 225 void pmap_ldt_sync(struct pmap *); 226 227 void pmap_emap_enter(vaddr_t, paddr_t, vm_prot_t); 228 void pmap_emap_remove(vaddr_t, vsize_t); 229 void pmap_emap_sync(bool); 230 231 vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */ 232 233 void pmap_tlb_shootdown(pmap_t, vaddr_t, vaddr_t, pt_entry_t); 234 void pmap_tlb_shootwait(void); 235 236 #define __HAVE_PMAP_EMAP 237 238 #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */ 239 #define PMAP_FORK /* turn on pmap_fork interface */ 240 241 /* 242 * Do idle page zero'ing uncached to avoid polluting the cache. 243 */ 244 bool pmap_pageidlezero(paddr_t); 245 #define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa)) 246 247 /* 248 * inline functions 249 */ 250 251 /* 252 * pmap_update_pg: flush one page from the TLB (or flush the whole thing 253 * if hardware doesn't support one-page flushing) 254 */ 255 256 __inline static void __unused 257 pmap_update_pg(vaddr_t va) 258 { 259 invlpg(va); 260 } 261 262 /* 263 * pmap_update_2pg: flush two pages from the TLB 264 */ 265 266 __inline static void __unused 267 pmap_update_2pg(vaddr_t va, vaddr_t vb) 268 { 269 invlpg(va); 270 invlpg(vb); 271 } 272 273 /* 274 * pmap_page_protect: change the protection of all recorded mappings 275 * of a managed page 276 * 277 * => this function is a frontend for pmap_page_remove/pmap_clear_attrs 278 * => we only have to worry about making the page more protected. 279 * unprotecting a page is done on-demand at fault time. 280 */ 281 282 __inline static void __unused 283 pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 284 { 285 if ((prot & VM_PROT_WRITE) == 0) { 286 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) { 287 (void) pmap_clear_attrs(pg, PG_RW); 288 } else { 289 pmap_page_remove(pg); 290 } 291 } 292 } 293 294 /* 295 * pmap_protect: change the protection of pages in a pmap 296 * 297 * => this function is a frontend for pmap_remove/pmap_write_protect 298 * => we only have to worry about making the page more protected. 299 * unprotecting a page is done on-demand at fault time. 300 */ 301 302 __inline static void __unused 303 pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) 304 { 305 if ((prot & VM_PROT_WRITE) == 0) { 306 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) { 307 pmap_write_protect(pmap, sva, eva, prot); 308 } else { 309 pmap_remove(pmap, sva, eva); 310 } 311 } 312 } 313 314 /* 315 * various address inlines 316 * 317 * vtopte: return a pointer to the PTE mapping a VA, works only for 318 * user and PT addresses 319 * 320 * kvtopte: return a pointer to the PTE mapping a kernel VA 321 */ 322 323 #include <lib/libkern/libkern.h> 324 325 static __inline pt_entry_t * __unused 326 vtopte(vaddr_t va) 327 { 328 329 KASSERT(va < VM_MIN_KERNEL_ADDRESS); 330 331 return (PTE_BASE + pl1_i(va)); 332 } 333 334 static __inline pt_entry_t * __unused 335 kvtopte(vaddr_t va) 336 { 337 pd_entry_t *pde; 338 339 KASSERT(va >= VM_MIN_KERNEL_ADDRESS); 340 341 pde = L2_BASE + pl2_i(va); 342 if (*pde & PG_PS) 343 return ((pt_entry_t *)pde); 344 345 return (PTE_BASE + pl1_i(va)); 346 } 347 348 paddr_t vtophys(vaddr_t); 349 vaddr_t pmap_map(vaddr_t, paddr_t, paddr_t, vm_prot_t); 350 void pmap_cpu_init_early(struct cpu_info *); 351 void pmap_cpu_init_late(struct cpu_info *); 352 bool sse2_idlezero_page(void *); 353 354 355 #ifdef XEN 356 357 #define XPTE_MASK L1_FRAME 358 /* XPTE_SHIFT = L1_SHIFT - log2(sizeof(pt_entry_t)) */ 359 #if defined(__x86_64__) || defined(PAE) 360 #define XPTE_SHIFT 9 361 #else 362 #define XPTE_SHIFT 10 363 #endif 364 365 /* PTE access inline fuctions */ 366 367 /* 368 * Get the machine address of the pointed pte 369 * We use hardware MMU to get value so works only for levels 1-3 370 */ 371 372 static __inline paddr_t 373 xpmap_ptetomach(pt_entry_t *pte) 374 { 375 pt_entry_t *up_pte; 376 vaddr_t va = (vaddr_t) pte; 377 378 va = ((va & XPTE_MASK) >> XPTE_SHIFT) | (vaddr_t) PTE_BASE; 379 up_pte = (pt_entry_t *) va; 380 381 return (paddr_t) (((*up_pte) & PG_FRAME) + (((vaddr_t) pte) & (~PG_FRAME & ~VA_SIGN_MASK))); 382 } 383 384 /* 385 * xpmap_update() 386 * Update an active pt entry with Xen 387 * Equivalent to *pte = npte 388 */ 389 390 static __inline void 391 xpmap_update (pt_entry_t *pte, pt_entry_t npte) 392 { 393 int s = splvm(); 394 395 xpq_queue_pte_update(xpmap_ptetomach(pte), npte); 396 xpq_flush_queue(); 397 splx(s); 398 } 399 400 401 /* Xen helpers to change bits of a pte */ 402 #define XPMAP_UPDATE_DIRECT 1 /* Update direct map entry flags too */ 403 404 /* pmap functions with machine addresses */ 405 void pmap_kenter_ma(vaddr_t, paddr_t, vm_prot_t, u_int); 406 int pmap_enter_ma(struct pmap *, vaddr_t, paddr_t, paddr_t, 407 vm_prot_t, u_int, int); 408 bool pmap_extract_ma(pmap_t, vaddr_t, paddr_t *); 409 410 paddr_t vtomach(vaddr_t); 411 #define vtomfn(va) (vtomach(va) >> PAGE_SHIFT) 412 413 #endif /* XEN */ 414 415 /* 416 * Hooks for the pool allocator. 417 */ 418 #define POOL_VTOPHYS(va) vtophys((vaddr_t) (va)) 419 420 /* 421 * TLB shootdown mailbox. 422 */ 423 424 struct pmap_mbox { 425 volatile void *mb_pointer; 426 volatile uintptr_t mb_addr1; 427 volatile uintptr_t mb_addr2; 428 volatile uintptr_t mb_head; 429 volatile uintptr_t mb_tail; 430 volatile uintptr_t mb_global; 431 }; 432 433 #endif /* _KERNEL */ 434 435 #endif /* _X86_PMAP_H_ */ 436