1 /* $NetBSD: pmap.h,v 1.26 2022/11/03 18:55:07 skrll Exp $ */ 2 3 /* 4 * Copyright (c) 1992, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Ralph Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)pmap.h 8.1 (Berkeley) 6/10/93 35 */ 36 37 /* 38 * Copyright (c) 1987 Carnegie-Mellon University 39 * 40 * This code is derived from software contributed to Berkeley by 41 * Ralph Campbell. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. All advertising materials mentioning features or use of this software 52 * must display the following acknowledgement: 53 * This product includes software developed by the University of 54 * California, Berkeley and its contributors. 55 * 4. Neither the name of the University nor the names of its contributors 56 * may be used to endorse or promote products derived from this software 57 * without specific prior written permission. 58 * 59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 69 * SUCH DAMAGE. 70 * 71 * @(#)pmap.h 8.1 (Berkeley) 6/10/93 72 */ 73 74 #ifdef _KERNEL_OPT 75 #include "opt_efi.h" 76 #endif 77 78 #ifndef _UVM_PMAP_PMAP_H_ 79 #define _UVM_PMAP_PMAP_H_ 80 81 #include <sys/rwlock.h> 82 #include <uvm/uvm_object.h> 83 #include <uvm/uvm_stat.h> 84 85 #ifdef UVMHIST 86 UVMHIST_DECL(pmapexechist); 87 UVMHIST_DECL(pmaphist); 88 UVMHIST_DECL(pmapxtabhist); 89 #endif 90 91 /* 92 * Alternate mapping hooks for pool pages. Avoids thrashing the TLB. 93 */ 94 struct vm_page *pmap_md_alloc_poolpage(int); 95 96 #if !defined(KASAN) 97 vaddr_t pmap_map_poolpage(paddr_t); 98 paddr_t pmap_unmap_poolpage(vaddr_t); 99 #define PMAP_ALLOC_POOLPAGE(flags) pmap_md_alloc_poolpage(flags) 100 #define PMAP_MAP_POOLPAGE(pa) pmap_map_poolpage(pa) 101 #define PMAP_UNMAP_POOLPAGE(va) pmap_unmap_poolpage(va) 102 103 #if defined(_LP64) 104 #define PMAP_DIRECT 105 static __inline int 106 pmap_direct_process(paddr_t pa, voff_t pgoff, size_t len, 107 int (*process)(void *, size_t, void *), void *arg) 108 { 109 vaddr_t va = pmap_md_direct_map_paddr(pa); 110 111 return process((void *)(va + pgoff), len, arg); 112 } 113 #endif 114 #endif 115 116 #define PMAP_MAP_PDETABPAGE(pa) pmap_md_map_poolpage(pa, PAGE_SIZE) 117 #define PMAP_MAP_SEGTABPAGE(pa) pmap_md_map_poolpage(pa, PAGE_SIZE) 118 #define PMAP_MAP_PTEPAGE(pa) pmap_md_map_poolpage(pa, PAGE_SIZE) 119 120 /* 121 * The user address space is mapped using a two level structure where 122 * virtual address bits 31..22 are used to index into a segment table which 123 * points to a page worth of PTEs (4096 page can hold 1024 PTEs). 124 * Bits 21..12 are then used to index a PTE which describes a page within 125 * a segment. 126 */ 127 128 #define pmap_trunc_seg(x) ((vaddr_t)(x) & ~SEGOFSET) 129 #define pmap_round_seg(x) (((vaddr_t)(x) + SEGOFSET) & ~SEGOFSET) 130 131 /* 132 * Each ptpage maps a "segment" worth of address space. That is 133 * NPTEPG * PAGE_SIZE. 134 */ 135 136 typedef struct { 137 pt_entry_t ppg_ptes[NPTEPG]; 138 } pmap_ptpage_t; 139 140 #if defined(PMAP_HWPAGEWALKER) 141 typedef union pmap_pdetab { 142 pd_entry_t pde_pde[PMAP_PDETABSIZE]; 143 union pmap_pdetab * pde_next; 144 } pmap_pdetab_t; 145 #endif 146 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE) 147 typedef union pmap_segtab { 148 #ifdef _LP64 149 union pmap_segtab * seg_seg[PMAP_SEGTABSIZE]; 150 #endif 151 pmap_ptpage_t * seg_ppg[PMAP_SEGTABSIZE]; 152 #ifdef PMAP_HWPAGEWALKER 153 pd_entry_t seg_pde[PMAP_PDETABSIZE]; 154 #endif 155 union pmap_segtab * seg_next; 156 } pmap_segtab_t; 157 #endif 158 159 160 #ifdef _KERNEL 161 struct pmap; 162 typedef bool (*pte_callback_t)(struct pmap *, vaddr_t, vaddr_t, 163 pt_entry_t *, uintptr_t); 164 165 /* 166 * Common part of the bootstraping the system enough to run with 167 * virtual memory. 168 */ 169 void pmap_bootstrap_common(void); 170 171 pt_entry_t *pmap_pte_lookup(struct pmap *, vaddr_t); 172 pt_entry_t *pmap_pte_reserve(struct pmap *, vaddr_t, int); 173 void pmap_pte_process(struct pmap *, vaddr_t, vaddr_t, pte_callback_t, 174 uintptr_t); 175 void pmap_segtab_activate(struct pmap *, struct lwp *); 176 void pmap_segtab_deactivate(struct pmap *); 177 void pmap_segtab_init(struct pmap *); 178 void pmap_segtab_destroy(struct pmap *, pte_callback_t, uintptr_t); 179 #ifdef PMAP_HWPAGEWALKER 180 pd_entry_t *pmap_pde_lookup(struct pmap *, vaddr_t, paddr_t *); 181 bool pmap_pdetab_fixup(struct pmap *, vaddr_t); 182 #endif 183 extern kmutex_t pmap_segtab_lock; 184 #endif /* _KERNEL */ 185 186 #ifdef MULTIPROCESSOR 187 #include <sys/kcpuset.h> 188 #endif 189 #include <uvm/pmap/pmap_tlb.h> 190 191 /* 192 * Machine dependent pmap structure. 193 */ 194 struct pmap { 195 struct uvm_object pm_uobject; 196 #define pm_refcnt pm_uobject.uo_refs /* pmap reference count */ 197 #define pm_pvp_list pm_uobject.memq 198 199 krwlock_t pm_obj_lock; /* lock for pm_uobject */ 200 #define pm_lock pm_uobject.vmobjlock 201 202 struct pglist pm_ppg_list; 203 #if defined(PMAP_HWPAGEWALKER) 204 struct pglist pm_pdetab_list; 205 #endif 206 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE) 207 struct pglist pm_segtab_list; 208 #endif 209 #ifdef MULTIPROCESSOR 210 kcpuset_t *pm_active; /* pmap was active on ... */ 211 kcpuset_t *pm_onproc; /* pmap is active on ... */ 212 volatile u_int pm_shootdown_pending; 213 #endif 214 #if defined(PMAP_HWPAGEWALKER) 215 pmap_pdetab_t * pm_pdetab; /* pointer to HW PDEs */ 216 #endif 217 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE) 218 pmap_segtab_t * pm_segtab; /* pointers to pages of PTEs; or */ 219 /* virtual shadow of HW PDEs */ 220 #endif 221 u_int pm_flags; 222 #define PMAP_DEFERRED_ACTIVATE __BIT(0) 223 struct pmap_statistics pm_stats; /* pmap statistics */ 224 vaddr_t pm_minaddr; 225 vaddr_t pm_maxaddr; 226 #ifdef __HAVE_PMAP_MD 227 struct pmap_md pm_md; 228 #endif 229 struct pmap_asid_info pm_pai[1]; 230 }; 231 232 233 #ifdef _KERNEL 234 static inline void 235 pmap_lock(struct pmap *pm) 236 { 237 238 rw_enter(pm->pm_lock, RW_WRITER); 239 } 240 241 static inline void 242 pmap_unlock(struct pmap *pm) 243 { 244 245 rw_exit(pm->pm_lock); 246 } 247 248 struct pmap_kernel { 249 struct pmap kernel_pmap; 250 #if defined(MULTIPROCESSOR) && PMAP_TLB_MAX > 1 251 struct pmap_asid_info kernel_pai[PMAP_TLB_MAX-1]; 252 #endif 253 }; 254 255 struct pmap_limits { 256 paddr_t avail_start; 257 paddr_t avail_end; 258 vaddr_t virtual_start; 259 vaddr_t virtual_end; 260 }; 261 262 /* 263 * Initialize the kernel pmap. 264 */ 265 #ifdef MULTIPROCESSOR 266 #define PMAP_SIZE offsetof(struct pmap, pm_pai[PMAP_TLB_MAX]) 267 #else 268 #define PMAP_SIZE sizeof(struct pmap) 269 #endif 270 271 /* 272 * The pools from which pmap structures and sub-structures are allocated. 273 */ 274 extern struct pool pmap_pmap_pool; 275 extern struct pool pmap_pv_pool; 276 extern struct pool_allocator pmap_pv_page_allocator; 277 278 extern struct pmap_kernel kernel_pmap_store; 279 extern struct pmap_limits pmap_limits; 280 281 extern u_int pmap_page_colormask; 282 283 /* 284 * The current top of kernel VM 285 */ 286 extern vaddr_t pmap_curmaxkvaddr; 287 288 #if defined(PMAP_HWPAGEWALKER) 289 extern pmap_pdetab_t pmap_kern_pdetab; 290 #else 291 extern pmap_segtab_t pmap_kern_segtab; 292 #endif 293 294 #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count) 295 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) 296 297 bool pmap_remove_all(pmap_t); 298 void pmap_set_modified(paddr_t); 299 bool pmap_page_clear_attributes(struct vm_page_md *, u_int); 300 void pmap_page_set_attributes(struct vm_page_md *, u_int); 301 void pmap_pvlist_lock_init(size_t); 302 #ifdef PMAP_VIRTUAL_CACHE_ALIASES 303 void pmap_page_cache(struct vm_page_md *, bool); 304 #endif 305 306 #if defined(__HAVE_PMAP_PV_TRACK) && !defined(PMAP_PV_TRACK_ONLY_STUBS) 307 void pmap_pv_protect(paddr_t, vm_prot_t); 308 #endif 309 310 #define PMAP_WB 0 311 #define PMAP_WBINV 1 312 #define PMAP_INV 2 313 314 kmutex_t *pmap_pvlist_lock_addr(struct vm_page_md *); 315 316 #define PMAP_STEAL_MEMORY /* enable pmap_steal_memory() */ 317 #define PMAP_GROWKERNEL /* enable pmap_growkernel() */ 318 319 #define PMAP_COUNT(name) (pmap_evcnt_##name.ev_count++ + 0) 320 #define PMAP_COUNTER(name, desc) \ 321 struct evcnt pmap_evcnt_##name = \ 322 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", desc); \ 323 EVCNT_ATTACH_STATIC(pmap_evcnt_##name) 324 325 326 static inline pt_entry_t * 327 kvtopte(vaddr_t va) 328 { 329 330 return pmap_pte_lookup(pmap_kernel(), va); 331 } 332 333 /* for ddb */ 334 void pmap_db_pmap_print(struct pmap *, void (*)(const char *, ...) __printflike(1, 2)); 335 void pmap_db_mdpg_print(struct vm_page *, void (*)(const char *, ...) __printflike(1, 2)); 336 337 #if defined(EFI_RUNTIME) 338 struct pmap * 339 pmap_efirt(void); 340 341 #define pmap_activate_efirt() pmap_md_activate_efirt() 342 #define pmap_deactivate_efirt() pmap_md_deactivate_efirt() 343 344 #endif 345 346 #endif /* _KERNEL */ 347 #endif /* _UVM_PMAP_PMAP_H_ */ 348