1 /* $NetBSD: pmap.h,v 1.61 2002/09/22 07:53:40 chs Exp $ */ 2 3 /* 4 * Copyright (c 2002 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Copyright (c) 1994,1995 Mark Brinicombe. 40 * All rights reserved. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. All advertising materials mentioning features or use of this software 51 * must display the following acknowledgement: 52 * This product includes software developed by Mark Brinicombe 53 * 4. The name of the author may not be used to endorse or promote products 54 * derived from this software without specific prior written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 57 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 58 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 59 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 60 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 61 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 62 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 63 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 64 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 65 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 66 */ 67 68 #ifndef _ARM32_PMAP_H_ 69 #define _ARM32_PMAP_H_ 70 71 #ifdef _KERNEL 72 73 #include <arm/cpuconf.h> 74 #include <arm/cpufunc.h> 75 #include <arm/arm32/pte.h> 76 #include <uvm/uvm_object.h> 77 78 /* 79 * a pmap describes a processes' 4GB virtual address space. this 80 * virtual address space can be broken up into 4096 1MB regions which 81 * are described by L1 PTEs in the L1 table. 82 * 83 * There is a line drawn at KERNEL_BASE. Everything below that line 84 * changes when the VM context is switched. Everything above that line 85 * is the same no matter which VM context is running. This is achieved 86 * by making the L1 PTEs for those slots above KERNEL_BASE reference 87 * kernel L2 tables. 88 * 89 * The L2 tables are mapped linearly starting at PTE_BASE. PTE_BASE 90 * is below KERNEL_BASE, which means that the current process's PTEs 91 * are always available starting at PTE_BASE. Another region of KVA 92 * above KERNEL_BASE, APTE_BASE, is reserved for mapping in the PTEs 93 * of another process, should we need to manipulate them. 94 * 95 * The basic layout of the virtual address space thus looks like this: 96 * 97 * 0xffffffff 98 * . 99 * . 100 * . 101 * KERNEL_BASE 102 * -------------------- 103 * PTE_BASE 104 * . 105 * . 106 * . 107 * 0x00000000 108 */ 109 110 /* 111 * The pmap structure itself. 112 */ 113 struct pmap { 114 struct uvm_object pm_obj; /* uvm_object */ 115 #define pm_lock pm_obj.vmobjlock 116 LIST_ENTRY(pmap) pm_list; /* list (lck by pm_list lock) */ 117 pd_entry_t *pm_pdir; /* KVA of page directory */ 118 struct l1pt *pm_l1pt; /* L1 table metadata */ 119 paddr_t pm_pptpt; /* PA of pt's page table */ 120 vaddr_t pm_vptpt; /* VA of pt's page table */ 121 struct pmap_statistics pm_stats; /* pmap statistics */ 122 struct vm_page *pm_ptphint; /* recently used PT */ 123 }; 124 125 typedef struct pmap *pmap_t; 126 127 /* 128 * Physical / virtual address structure. In a number of places (particularly 129 * during bootstrapping) we need to keep track of the physical and virtual 130 * addresses of various pages 131 */ 132 typedef struct pv_addr { 133 SLIST_ENTRY(pv_addr) pv_list; 134 paddr_t pv_pa; 135 vaddr_t pv_va; 136 } pv_addr_t; 137 138 /* 139 * Determine various modes for PTEs (user vs. kernel, cacheable 140 * vs. non-cacheable). 141 */ 142 #define PTE_KERNEL 0 143 #define PTE_USER 1 144 #define PTE_NOCACHE 0 145 #define PTE_CACHE 1 146 147 /* 148 * Flags that indicate attributes of pages or mappings of pages. 149 * 150 * The PVF_MOD and PVF_REF flags are stored in the mdpage for each 151 * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual 152 * pv_entry's for each page. They live in the same "namespace" so 153 * that we can clear multiple attributes at a time. 154 * 155 * Note the "non-cacheable" flag generally means the page has 156 * multiple mappings in a given address space. 157 */ 158 #define PVF_MOD 0x01 /* page is modified */ 159 #define PVF_REF 0x02 /* page is referenced */ 160 #define PVF_WIRED 0x04 /* mapping is wired */ 161 #define PVF_WRITE 0x08 /* mapping is writable */ 162 #define PVF_EXEC 0x10 /* mapping is executable */ 163 #define PVF_NC 0x20 /* mapping is non-cacheable */ 164 165 /* 166 * Commonly referenced structures 167 */ 168 extern struct pmap kernel_pmap_store; 169 extern int pmap_debug_level; /* Only exists if PMAP_DEBUG */ 170 171 /* 172 * Macros that we need to export 173 */ 174 #define pmap_kernel() (&kernel_pmap_store) 175 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) 176 #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count) 177 178 #define pmap_is_modified(pg) \ 179 (((pg)->mdpage.pvh_attrs & PVF_MOD) != 0) 180 #define pmap_is_referenced(pg) \ 181 (((pg)->mdpage.pvh_attrs & PVF_REF) != 0) 182 183 #define pmap_copy(dp, sp, da, l, sa) /* nothing */ 184 185 static __inline void 186 pmap_remove_all(struct pmap *pmap) 187 { 188 /* Nothing. */ 189 } 190 191 #define pmap_phys_address(ppn) (arm_ptob((ppn))) 192 193 /* 194 * Functions that we need to export 195 */ 196 vaddr_t pmap_map(vaddr_t, vaddr_t, vaddr_t, int); 197 void pmap_procwr(struct proc *, vaddr_t, int); 198 199 #define PMAP_NEED_PROCWR 200 #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */ 201 202 /* Functions we use internally. */ 203 void pmap_bootstrap(pd_entry_t *, pv_addr_t); 204 void pmap_debug(int); 205 int pmap_handled_emulation(struct pmap *, vaddr_t); 206 int pmap_modified_emulation(struct pmap *, vaddr_t); 207 void pmap_postinit(void); 208 209 void vector_page_setprot(int); 210 211 /* Bootstrapping routines. */ 212 void pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int); 213 void pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int); 214 vsize_t pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int); 215 void pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *); 216 217 /* 218 * Special page zero routine for use by the idle loop (no cache cleans). 219 */ 220 boolean_t pmap_pageidlezero __P((paddr_t)); 221 #define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa)) 222 223 /* 224 * The current top of kernel VM 225 */ 226 extern vaddr_t pmap_curmaxkvaddr; 227 228 /* 229 * Useful macros and constants 230 */ 231 232 /* 233 * While the ARM MMU's L1 descriptors describe a 1M "section", each 234 * one pointing to a 1K L2 table, NetBSD's VM system allocates the 235 * page tables in 4K chunks, and thus we describe 4M "super sections". 236 * 237 * We'll lift terminology from another architecture and refer to this as 238 * the "page directory" size. 239 */ 240 #define PD_SIZE (L1_S_SIZE * 4) /* 4M */ 241 #define PD_OFFSET (PD_SIZE - 1) 242 #define PD_FRAME (~PD_OFFSET) 243 #define PD_SHIFT 22 244 245 /* Virtual address to page table entry */ 246 #define vtopte(va) \ 247 (((pt_entry_t *)PTE_BASE) + arm_btop((vaddr_t) (va))) 248 249 /* Virtual address to physical address */ 250 #define vtophys(va) \ 251 ((*vtopte(va) & L2_S_FRAME) | ((vaddr_t) (va) & L2_S_OFFSET)) 252 253 #define PTE_SYNC(pte) \ 254 cpu_dcache_wb_range((vaddr_t)(pte), sizeof(pt_entry_t)) 255 #define PTE_FLUSH(pte) \ 256 cpu_dcache_wbinv_range((vaddr_t)(pte), sizeof(pt_entry_t)) 257 258 #define PTE_SYNC_RANGE(pte, cnt) \ 259 cpu_dcache_wb_range((vaddr_t)(pte), (cnt) << 2) /* * sizeof(...) */ 260 #define PTE_FLUSH_RANGE(pte) \ 261 cpu_dcache_wbinv_range((vaddr_t)(pte), (cnt) << 2) /* * sizeof(...) */ 262 263 #define l1pte_valid(pde) ((pde) != 0) 264 #define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S) 265 #define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C) 266 #define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F) 267 268 #define l2pte_valid(pte) ((pte) != 0) 269 #define l2pte_pa(pte) ((pte) & L2_S_FRAME) 270 271 /* L1 and L2 page table macros */ 272 #define pmap_pdei(v) ((v & L1_S_FRAME) >> L1_S_SHIFT) 273 #define pmap_pde(m, v) (&((m)->pm_pdir[pmap_pdei(v)])) 274 275 #define pmap_pde_v(pde) l1pte_valid(*(pde)) 276 #define pmap_pde_section(pde) l1pte_section_p(*(pde)) 277 #define pmap_pde_page(pde) l1pte_page_p(*(pde)) 278 #define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde)) 279 280 #define pmap_pte_v(pte) l2pte_valid(*(pte)) 281 #define pmap_pte_pa(pte) l2pte_pa(*(pte)) 282 283 284 /* Size of the kernel part of the L1 page table */ 285 #define KERNEL_PD_SIZE \ 286 (L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t)) 287 288 /************************* ARM MMU configuration *****************************/ 289 290 #if ARM_MMU_GENERIC == 1 291 void pmap_copy_page_generic(paddr_t, paddr_t); 292 void pmap_zero_page_generic(paddr_t); 293 294 void pmap_pte_init_generic(void); 295 #if defined(CPU_ARM9) 296 void pmap_pte_init_arm9(void); 297 #endif /* CPU_ARM9 */ 298 #endif /* ARM_MMU_GENERIC == 1 */ 299 300 #if ARM_MMU_XSCALE == 1 301 void pmap_copy_page_xscale(paddr_t, paddr_t); 302 void pmap_zero_page_xscale(paddr_t); 303 304 void pmap_pte_init_xscale(void); 305 306 void xscale_setup_minidata(vaddr_t, vaddr_t, paddr_t); 307 #endif /* ARM_MMU_XSCALE == 1 */ 308 309 extern pt_entry_t pte_l1_s_cache_mode; 310 extern pt_entry_t pte_l1_s_cache_mask; 311 312 extern pt_entry_t pte_l2_l_cache_mode; 313 extern pt_entry_t pte_l2_l_cache_mask; 314 315 extern pt_entry_t pte_l2_s_cache_mode; 316 extern pt_entry_t pte_l2_s_cache_mask; 317 318 extern pt_entry_t pte_l2_s_prot_u; 319 extern pt_entry_t pte_l2_s_prot_w; 320 extern pt_entry_t pte_l2_s_prot_mask; 321 322 extern pt_entry_t pte_l1_s_proto; 323 extern pt_entry_t pte_l1_c_proto; 324 extern pt_entry_t pte_l2_s_proto; 325 326 extern void (*pmap_copy_page_func)(paddr_t, paddr_t); 327 extern void (*pmap_zero_page_func)(paddr_t); 328 329 /*****************************************************************************/ 330 331 /* 332 * tell MI code that the cache is virtually-indexed *and* virtually-tagged. 333 */ 334 #define PMAP_CACHE_VIVT 335 336 /* 337 * These macros define the various bit masks in the PTE. 338 * 339 * We use these macros since we use different bits on different processor 340 * models. 341 */ 342 #define L1_S_PROT_U (L1_S_AP(AP_U)) 343 #define L1_S_PROT_W (L1_S_AP(AP_W)) 344 #define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W) 345 346 #define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C) 347 #define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X)) 348 349 #define L2_L_PROT_U (L2_AP(AP_U)) 350 #define L2_L_PROT_W (L2_AP(AP_W)) 351 #define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W) 352 353 #define L2_L_CACHE_MASK_generic (L2_B|L2_C) 354 #define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X)) 355 356 #define L2_S_PROT_U_generic (L2_AP(AP_U)) 357 #define L2_S_PROT_W_generic (L2_AP(AP_W)) 358 #define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W) 359 360 #define L2_S_PROT_U_xscale (L2_AP0(AP_U)) 361 #define L2_S_PROT_W_xscale (L2_AP0(AP_W)) 362 #define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W) 363 364 #define L2_S_CACHE_MASK_generic (L2_B|L2_C) 365 #define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X)) 366 367 #define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP) 368 #define L1_S_PROTO_xscale (L1_TYPE_S) 369 370 #define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2) 371 #define L1_C_PROTO_xscale (L1_TYPE_C) 372 373 #define L2_L_PROTO (L2_TYPE_L) 374 375 #define L2_S_PROTO_generic (L2_TYPE_S) 376 #define L2_S_PROTO_xscale (L2_TYPE_XSCALE_XS) 377 378 /* 379 * User-visible names for the ones that vary with MMU class. 380 */ 381 382 #if ARM_NMMUS > 1 383 /* More than one MMU class configured; use variables. */ 384 #define L2_S_PROT_U pte_l2_s_prot_u 385 #define L2_S_PROT_W pte_l2_s_prot_w 386 #define L2_S_PROT_MASK pte_l2_s_prot_mask 387 388 #define L1_S_CACHE_MASK pte_l1_s_cache_mask 389 #define L2_L_CACHE_MASK pte_l2_l_cache_mask 390 #define L2_S_CACHE_MASK pte_l2_s_cache_mask 391 392 #define L1_S_PROTO pte_l1_s_proto 393 #define L1_C_PROTO pte_l1_c_proto 394 #define L2_S_PROTO pte_l2_s_proto 395 396 #define pmap_copy_page(s, d) (*pmap_copy_page_func)((s), (d)) 397 #define pmap_zero_page(d) (*pmap_zero_page_func)((d)) 398 #elif ARM_MMU_GENERIC == 1 399 #define L2_S_PROT_U L2_S_PROT_U_generic 400 #define L2_S_PROT_W L2_S_PROT_W_generic 401 #define L2_S_PROT_MASK L2_S_PROT_MASK_generic 402 403 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic 404 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic 405 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic 406 407 #define L1_S_PROTO L1_S_PROTO_generic 408 #define L1_C_PROTO L1_C_PROTO_generic 409 #define L2_S_PROTO L2_S_PROTO_generic 410 411 #define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d)) 412 #define pmap_zero_page(d) pmap_zero_page_generic((d)) 413 #elif ARM_MMU_XSCALE == 1 414 #define L2_S_PROT_U L2_S_PROT_U_xscale 415 #define L2_S_PROT_W L2_S_PROT_W_xscale 416 #define L2_S_PROT_MASK L2_S_PROT_MASK_xscale 417 418 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale 419 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale 420 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale 421 422 #define L1_S_PROTO L1_S_PROTO_xscale 423 #define L1_C_PROTO L1_C_PROTO_xscale 424 #define L2_S_PROTO L2_S_PROTO_xscale 425 426 #define pmap_copy_page(s, d) pmap_copy_page_xscale((s), (d)) 427 #define pmap_zero_page(d) pmap_zero_page_xscale((d)) 428 #endif /* ARM_NMMUS > 1 */ 429 430 /* 431 * These macros return various bits based on kernel/user and protection. 432 * Note that the compiler will usually fold these at compile time. 433 */ 434 #define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \ 435 (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0)) 436 437 #define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \ 438 (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0)) 439 440 #define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \ 441 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0)) 442 443 #endif /* _KERNEL */ 444 445 #endif /* _ARM32_PMAP_H_ */ 446