1*aaa8efb3Smiod /* $OpenBSD: pmap.h,v 1.57 2024/11/07 08:12:12 miod Exp $ */ 2e1e4f5b1Sdrahn /* $NetBSD: pmap.h,v 1.76 2003/09/06 09:10:46 rearnsha Exp $ */ 3e1e4f5b1Sdrahn 4e1e4f5b1Sdrahn /* 5e1e4f5b1Sdrahn * Copyright (c) 2002, 2003 Wasabi Systems, Inc. 6e1e4f5b1Sdrahn * All rights reserved. 7e1e4f5b1Sdrahn * 8e1e4f5b1Sdrahn * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc. 9e1e4f5b1Sdrahn * 10e1e4f5b1Sdrahn * Redistribution and use in source and binary forms, with or without 11e1e4f5b1Sdrahn * modification, are permitted provided that the following conditions 12e1e4f5b1Sdrahn * are met: 13e1e4f5b1Sdrahn * 1. Redistributions of source code must retain the above copyright 14e1e4f5b1Sdrahn * notice, this list of conditions and the following disclaimer. 15e1e4f5b1Sdrahn * 2. Redistributions in binary form must reproduce the above copyright 16e1e4f5b1Sdrahn * notice, this list of conditions and the following disclaimer in the 17e1e4f5b1Sdrahn * documentation and/or other materials provided with the distribution. 18e1e4f5b1Sdrahn * 3. All advertising materials mentioning features or use of this software 19e1e4f5b1Sdrahn * must display the following acknowledgement: 20e1e4f5b1Sdrahn * This product includes software developed for the NetBSD Project by 21e1e4f5b1Sdrahn * Wasabi Systems, Inc. 22e1e4f5b1Sdrahn * 4. The name of Wasabi Systems, Inc. may not be used to endorse 23e1e4f5b1Sdrahn * or promote products derived from this software without specific prior 24e1e4f5b1Sdrahn * written permission. 25e1e4f5b1Sdrahn * 26e1e4f5b1Sdrahn * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 27e1e4f5b1Sdrahn * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28e1e4f5b1Sdrahn * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29e1e4f5b1Sdrahn * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 30e1e4f5b1Sdrahn * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31e1e4f5b1Sdrahn * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32e1e4f5b1Sdrahn * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33e1e4f5b1Sdrahn * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34e1e4f5b1Sdrahn * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35e1e4f5b1Sdrahn * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36e1e4f5b1Sdrahn * POSSIBILITY OF SUCH DAMAGE. 37e1e4f5b1Sdrahn */ 38e1e4f5b1Sdrahn 39e1e4f5b1Sdrahn /* 40e1e4f5b1Sdrahn * Copyright (c) 1994,1995 Mark Brinicombe. 41e1e4f5b1Sdrahn * All rights reserved. 42e1e4f5b1Sdrahn * 43e1e4f5b1Sdrahn * Redistribution and use in source and binary forms, with or without 44e1e4f5b1Sdrahn * modification, are permitted provided that the following conditions 45e1e4f5b1Sdrahn * are met: 46e1e4f5b1Sdrahn * 1. Redistributions of source code must retain the above copyright 47e1e4f5b1Sdrahn * notice, this list of conditions and the following disclaimer. 48e1e4f5b1Sdrahn * 2. Redistributions in binary form must reproduce the above copyright 49e1e4f5b1Sdrahn * notice, this list of conditions and the following disclaimer in the 50e1e4f5b1Sdrahn * documentation and/or other materials provided with the distribution. 51e1e4f5b1Sdrahn * 3. All advertising materials mentioning features or use of this software 52e1e4f5b1Sdrahn * must display the following acknowledgement: 53e1e4f5b1Sdrahn * This product includes software developed by Mark Brinicombe 54e1e4f5b1Sdrahn * 4. The name of the author may not be used to endorse or promote products 55e1e4f5b1Sdrahn * derived from this software without specific prior written permission. 56e1e4f5b1Sdrahn * 57e1e4f5b1Sdrahn * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 58e1e4f5b1Sdrahn * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 59e1e4f5b1Sdrahn * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 60e1e4f5b1Sdrahn * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 61e1e4f5b1Sdrahn * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 62e1e4f5b1Sdrahn * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 63e1e4f5b1Sdrahn * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 64e1e4f5b1Sdrahn * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 65e1e4f5b1Sdrahn * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 66e1e4f5b1Sdrahn * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 67e1e4f5b1Sdrahn */ 68e1e4f5b1Sdrahn 692fa72412Spirofti #ifndef _ARM_PMAP_H_ 702fa72412Spirofti #define _ARM_PMAP_H_ 71e1e4f5b1Sdrahn 72e1e4f5b1Sdrahn #ifdef _KERNEL 73e1e4f5b1Sdrahn 74e1e4f5b1Sdrahn #include <arm/cpuconf.h> 75e1e4f5b1Sdrahn #include <arm/pte.h> 76e1e4f5b1Sdrahn #ifndef _LOCORE 77e1e4f5b1Sdrahn #include <arm/cpufunc.h> 78e1e4f5b1Sdrahn #endif 79e1e4f5b1Sdrahn 80e1e4f5b1Sdrahn /* 81e1e4f5b1Sdrahn * a pmap describes a processes' 4GB virtual address space. this 82e1e4f5b1Sdrahn * virtual address space can be broken up into 4096 1MB regions which 83e1e4f5b1Sdrahn * are described by L1 PTEs in the L1 table. 84e1e4f5b1Sdrahn * 85e1e4f5b1Sdrahn * There is a line drawn at KERNEL_BASE. Everything below that line 86e1e4f5b1Sdrahn * changes when the VM context is switched. Everything above that line 87e1e4f5b1Sdrahn * is the same no matter which VM context is running. This is achieved 88e1e4f5b1Sdrahn * by making the L1 PTEs for those slots above KERNEL_BASE reference 89e1e4f5b1Sdrahn * kernel L2 tables. 90e1e4f5b1Sdrahn * 91e1e4f5b1Sdrahn * The basic layout of the virtual address space thus looks like this: 92e1e4f5b1Sdrahn * 93e1e4f5b1Sdrahn * 0xffffffff 94e1e4f5b1Sdrahn * . 95e1e4f5b1Sdrahn * . 96e1e4f5b1Sdrahn * . 97e1e4f5b1Sdrahn * KERNEL_BASE 98e1e4f5b1Sdrahn * -------------------- 99e1e4f5b1Sdrahn * . 100e1e4f5b1Sdrahn * . 101e1e4f5b1Sdrahn * . 102e1e4f5b1Sdrahn * 0x00000000 103e1e4f5b1Sdrahn */ 104e1e4f5b1Sdrahn 105e1e4f5b1Sdrahn /* 106e1e4f5b1Sdrahn * The number of L2 descriptor tables which can be tracked by an l2_dtable. 107e1e4f5b1Sdrahn * A bucket size of 16 provides for 16MB of contiguous virtual address 108e1e4f5b1Sdrahn * space per l2_dtable. Most processes will, therefore, require only two or 109e1e4f5b1Sdrahn * three of these to map their whole working set. 110e1e4f5b1Sdrahn */ 111e1e4f5b1Sdrahn #define L2_BUCKET_LOG2 4 112e1e4f5b1Sdrahn #define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2) 113e1e4f5b1Sdrahn 114e1e4f5b1Sdrahn /* 115e1e4f5b1Sdrahn * Given the above "L2-descriptors-per-l2_dtable" constant, the number 116e1e4f5b1Sdrahn * of l2_dtable structures required to track all possible page descriptors 117e1e4f5b1Sdrahn * mappable by an L1 translation table is given by the following constants: 118e1e4f5b1Sdrahn */ 119e1e4f5b1Sdrahn #define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2) 120e1e4f5b1Sdrahn #define L2_SIZE (1 << L2_LOG2) 121e1e4f5b1Sdrahn 122e1e4f5b1Sdrahn #ifndef _LOCORE 123e1e4f5b1Sdrahn 124e1e4f5b1Sdrahn struct l1_ttable; 125e1e4f5b1Sdrahn struct l2_dtable; 126e1e4f5b1Sdrahn 127e1e4f5b1Sdrahn /* 128e1e4f5b1Sdrahn * Track cache/tlb occupancy using the following structure 129e1e4f5b1Sdrahn */ 130e1e4f5b1Sdrahn union pmap_cache_state { 131e1e4f5b1Sdrahn struct { 132e1e4f5b1Sdrahn union { 133e1e4f5b1Sdrahn u_int8_t csu_cache_b[2]; 134e1e4f5b1Sdrahn u_int16_t csu_cache; 135e1e4f5b1Sdrahn } cs_cache_u; 136e1e4f5b1Sdrahn 137e1e4f5b1Sdrahn union { 138e1e4f5b1Sdrahn u_int8_t csu_tlb_b[2]; 139e1e4f5b1Sdrahn u_int16_t csu_tlb; 140e1e4f5b1Sdrahn } cs_tlb_u; 141e1e4f5b1Sdrahn } cs_s; 142e1e4f5b1Sdrahn u_int32_t cs_all; 143e1e4f5b1Sdrahn }; 144e1e4f5b1Sdrahn #define cs_cache_id cs_s.cs_cache_u.csu_cache_b[0] 145e1e4f5b1Sdrahn #define cs_cache_d cs_s.cs_cache_u.csu_cache_b[1] 146e1e4f5b1Sdrahn #define cs_cache cs_s.cs_cache_u.csu_cache 147e1e4f5b1Sdrahn #define cs_tlb_id cs_s.cs_tlb_u.csu_tlb_b[0] 148e1e4f5b1Sdrahn #define cs_tlb_d cs_s.cs_tlb_u.csu_tlb_b[1] 149e1e4f5b1Sdrahn #define cs_tlb cs_s.cs_tlb_u.csu_tlb 150e1e4f5b1Sdrahn 151e1e4f5b1Sdrahn /* 152e1e4f5b1Sdrahn * Assigned to cs_all to force cacheops to work for a particular pmap 153e1e4f5b1Sdrahn */ 154e1e4f5b1Sdrahn #define PMAP_CACHE_STATE_ALL 0xffffffffu 155e1e4f5b1Sdrahn 156e1e4f5b1Sdrahn /* 157e1e4f5b1Sdrahn * The pmap structure itself 158e1e4f5b1Sdrahn */ 159e1e4f5b1Sdrahn struct pmap { 160e1e4f5b1Sdrahn u_int8_t pm_domain; 16120b8e21aSmpi int pm_remove_all; 162e1e4f5b1Sdrahn struct l1_ttable *pm_l1; 163e1e4f5b1Sdrahn union pmap_cache_state pm_cstate; 1646bcb2ac6Smiod u_int pm_refs; 165e1e4f5b1Sdrahn struct l2_dtable *pm_l2[L2_SIZE]; 166e1e4f5b1Sdrahn struct pmap_statistics pm_stats; 167e1e4f5b1Sdrahn }; 168e1e4f5b1Sdrahn 169e1e4f5b1Sdrahn typedef struct pmap *pmap_t; 170e1e4f5b1Sdrahn 171e1e4f5b1Sdrahn /* 172e2bd3b65Skettenis * MD flags that we use for pmap_enter (in the pa): 173e2bd3b65Skettenis */ 174e2bd3b65Skettenis #define PMAP_PA_MASK ~((paddr_t)PAGE_MASK) /* to remove the flags */ 175e2bd3b65Skettenis #define PMAP_NOCACHE 0x1 /* non-cacheable memory. */ 176e2bd3b65Skettenis #define PMAP_DEVICE 0x2 /* device memory. */ 177e2bd3b65Skettenis 178e2bd3b65Skettenis /* 179e1e4f5b1Sdrahn * Physical / virtual address structure. In a number of places (particularly 180e1e4f5b1Sdrahn * during bootstrapping) we need to keep track of the physical and virtual 181e1e4f5b1Sdrahn * addresses of various pages 182e1e4f5b1Sdrahn */ 183e1e4f5b1Sdrahn typedef struct pv_addr { 184e1e4f5b1Sdrahn SLIST_ENTRY(pv_addr) pv_list; 185e1e4f5b1Sdrahn paddr_t pv_pa; 186e1e4f5b1Sdrahn vaddr_t pv_va; 187e1e4f5b1Sdrahn } pv_addr_t; 188e1e4f5b1Sdrahn 189e1e4f5b1Sdrahn /* 190e1e4f5b1Sdrahn * Determine various modes for PTEs (user vs. kernel, cacheable 191e1e4f5b1Sdrahn * vs. non-cacheable). 192e1e4f5b1Sdrahn */ 193e1e4f5b1Sdrahn #define PTE_KERNEL 0 194e1e4f5b1Sdrahn #define PTE_USER 1 195e1e4f5b1Sdrahn #define PTE_NOCACHE 0 196e1e4f5b1Sdrahn #define PTE_CACHE 1 197e1e4f5b1Sdrahn #define PTE_PAGETABLE 2 198e1e4f5b1Sdrahn 199e1e4f5b1Sdrahn /* 200e1e4f5b1Sdrahn * Flags that indicate attributes of pages or mappings of pages. 201e1e4f5b1Sdrahn * 202e1e4f5b1Sdrahn * The PVF_MOD and PVF_REF flags are stored in the mdpage for each 203bb54fedfSmiod * page. PVF_WIRED and PVF_WRITE are kept in individual pv_entry's 204bb54fedfSmiod * for each page. They live in the same "namespace" so that we can 205bb54fedfSmiod * clear multiple attributes at a time. 206e1e4f5b1Sdrahn */ 207e1e4f5b1Sdrahn #define PVF_MOD 0x01 /* page is modified */ 208e1e4f5b1Sdrahn #define PVF_REF 0x02 /* page is referenced */ 209e1e4f5b1Sdrahn #define PVF_WIRED 0x04 /* mapping is wired */ 210e1e4f5b1Sdrahn #define PVF_WRITE 0x08 /* mapping is writable */ 211e1e4f5b1Sdrahn #define PVF_EXEC 0x10 /* mapping is executable */ 212e1e4f5b1Sdrahn 213e1e4f5b1Sdrahn /* 214e1e4f5b1Sdrahn * Commonly referenced structures 215e1e4f5b1Sdrahn */ 216e1e4f5b1Sdrahn extern struct pmap kernel_pmap_store; 217e1e4f5b1Sdrahn 218e1e4f5b1Sdrahn /* 219e1e4f5b1Sdrahn * Macros that we need to export 220e1e4f5b1Sdrahn */ 221e1e4f5b1Sdrahn #define pmap_kernel() (&kernel_pmap_store) 222e1e4f5b1Sdrahn #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) 223e1e4f5b1Sdrahn #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count) 224e1e4f5b1Sdrahn 225e1e4f5b1Sdrahn #define pmap_is_modified(pg) \ 226e1e4f5b1Sdrahn (((pg)->mdpage.pvh_attrs & PVF_MOD) != 0) 227e1e4f5b1Sdrahn #define pmap_is_referenced(pg) \ 228e1e4f5b1Sdrahn (((pg)->mdpage.pvh_attrs & PVF_REF) != 0) 229e1e4f5b1Sdrahn 23045053f4aSart #define pmap_deactivate(p) do { /* nothing */ } while (0) 231e1e4f5b1Sdrahn 232551b33bfSkettenis #define pmap_init_percpu() do { /* nothing */ } while (0) 23345053f4aSart #define pmap_unuse_final(p) do { /* nothing */ } while (0) 234120b8d62Smiod #define pmap_remove_holes(vm) do { /* nothing */ } while (0) 23565a6ef6dSkettenis 236d62ebcb2Sderaadt #define PMAP_CHECK_COPYIN 1 237d62ebcb2Sderaadt 238e1e4f5b1Sdrahn #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */ 239e1e4f5b1Sdrahn 240e1e4f5b1Sdrahn /* Functions we use internally. */ 241e1e4f5b1Sdrahn void pmap_bootstrap(pd_entry_t *, vaddr_t, vaddr_t); 242e1e4f5b1Sdrahn 24320b8e21aSmpi int pmap_get_pde_pte(pmap_t, vaddr_t, pd_entry_t **, pt_entry_t **); 24420b8e21aSmpi int pmap_get_pde(pmap_t, vaddr_t, pd_entry_t **); 245e1e4f5b1Sdrahn void pmap_set_pcb_pagedir(pmap_t, struct pcb *); 246e1e4f5b1Sdrahn 247e1e4f5b1Sdrahn void pmap_postinit(void); 248e1e4f5b1Sdrahn 249e1e4f5b1Sdrahn void vector_page_setprot(int); 250e1e4f5b1Sdrahn 251e86c322dSdrahn /* XXX */ 252e86c322dSdrahn void pmap_kenter_cache(vaddr_t va, paddr_t pa, vm_prot_t prot, int cacheable); 253e86c322dSdrahn 254e1e4f5b1Sdrahn /* Bootstrapping routines. */ 255e1e4f5b1Sdrahn void pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int); 256e1e4f5b1Sdrahn void pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int); 257e1e4f5b1Sdrahn vsize_t pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int); 258e1e4f5b1Sdrahn void pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *); 259e1e4f5b1Sdrahn 260e1e4f5b1Sdrahn /* 261e1e4f5b1Sdrahn * The current top of kernel VM 262e1e4f5b1Sdrahn */ 263e1e4f5b1Sdrahn extern vaddr_t pmap_curmaxkvaddr; 264e1e4f5b1Sdrahn 265e1e4f5b1Sdrahn /* 266e1e4f5b1Sdrahn * Useful macros and constants 267e1e4f5b1Sdrahn */ 268e1e4f5b1Sdrahn 269e1e4f5b1Sdrahn /* Virtual address to page table entry */ 270e1e4f5b1Sdrahn static __inline pt_entry_t * 271e1e4f5b1Sdrahn vtopte(vaddr_t va) 272e1e4f5b1Sdrahn { 273e1e4f5b1Sdrahn pd_entry_t *pdep; 274e1e4f5b1Sdrahn pt_entry_t *ptep; 275e1e4f5b1Sdrahn 276e1e4f5b1Sdrahn if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == FALSE) 277e1e4f5b1Sdrahn return (NULL); 278e1e4f5b1Sdrahn return (ptep); 279e1e4f5b1Sdrahn } 280e1e4f5b1Sdrahn 281e1e4f5b1Sdrahn /* 282*aaa8efb3Smiod * Page tables are always mapped write-through. 283e1e4f5b1Sdrahn * Thus, on some platforms we can run fast and loose and avoid syncing PTEs 284e1e4f5b1Sdrahn * on every change. 285e1e4f5b1Sdrahn * 286e1e4f5b1Sdrahn * Unfortunately, not all CPUs have a write-through cache mode. So we 287*aaa8efb3Smiod * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs. 288e1e4f5b1Sdrahn */ 289e1e4f5b1Sdrahn extern int pmap_needs_pte_sync; 290ab5e4bb7Smiod 291e1e4f5b1Sdrahn #define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync 292e1e4f5b1Sdrahn 293e1e4f5b1Sdrahn #define PTE_SYNC(pte) \ 294e1e4f5b1Sdrahn do { \ 29531414e4eSjsg cpu_drain_writebuf(); \ 296db9ae5f9Spatrick if (PMAP_NEEDS_PTE_SYNC) { \ 297db9ae5f9Spatrick paddr_t pa; \ 298e1e4f5b1Sdrahn cpu_dcache_wb_range((vaddr_t)(pte), sizeof(pt_entry_t));\ 299db9ae5f9Spatrick if (cpu_sdcache_enabled()) { \ 300db9ae5f9Spatrick (void)pmap_extract(pmap_kernel(), (vaddr_t)(pte), &pa); \ 301db9ae5f9Spatrick cpu_sdcache_wb_range((vaddr_t)(pte), (paddr_t)(pa), \ 302db9ae5f9Spatrick sizeof(pt_entry_t)); \ 303db9ae5f9Spatrick }; \ 304db9ae5f9Spatrick cpu_drain_writebuf(); \ 305db9ae5f9Spatrick } \ 306e1e4f5b1Sdrahn } while (/*CONSTCOND*/0) 307e1e4f5b1Sdrahn 308e1e4f5b1Sdrahn #define PTE_SYNC_RANGE(pte, cnt) \ 309e1e4f5b1Sdrahn do { \ 31031414e4eSjsg cpu_drain_writebuf(); \ 311e1e4f5b1Sdrahn if (PMAP_NEEDS_PTE_SYNC) { \ 312db9ae5f9Spatrick paddr_t pa; \ 313e1e4f5b1Sdrahn cpu_dcache_wb_range((vaddr_t)(pte), \ 314e1e4f5b1Sdrahn (cnt) << 2); /* * sizeof(pt_entry_t) */ \ 315db9ae5f9Spatrick if (cpu_sdcache_enabled()) { \ 316db9ae5f9Spatrick (void)pmap_extract(pmap_kernel(), (vaddr_t)(pte), &pa);\ 317db9ae5f9Spatrick cpu_sdcache_wb_range((vaddr_t)(pte), (paddr_t)(pa), \ 318db9ae5f9Spatrick (cnt) << 2); /* * sizeof(pt_entry_t) */ \ 319db9ae5f9Spatrick }; \ 320db9ae5f9Spatrick cpu_drain_writebuf(); \ 321e1e4f5b1Sdrahn } \ 322e1e4f5b1Sdrahn } while (/*CONSTCOND*/0) 323e1e4f5b1Sdrahn 324db9ae5f9Spatrick #define l1pte_valid(pde) (((pde) & L1_TYPE_MASK) != L1_TYPE_INV) 325e1e4f5b1Sdrahn #define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S) 326e1e4f5b1Sdrahn #define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C) 327e1e4f5b1Sdrahn #define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F) 328e1e4f5b1Sdrahn 329e1e4f5b1Sdrahn #define l2pte_index(v) (((v) & L2_ADDR_BITS) >> L2_S_SHIFT) 330db9ae5f9Spatrick #define l2pte_valid(pte) (((pte) & L2_TYPE_MASK) != L2_TYPE_INV) 331e1e4f5b1Sdrahn #define l2pte_pa(pte) ((pte) & L2_S_FRAME) 332e1e4f5b1Sdrahn 333e1e4f5b1Sdrahn /* L1 and L2 page table macros */ 334e1e4f5b1Sdrahn #define pmap_pde_v(pde) l1pte_valid(*(pde)) 335e1e4f5b1Sdrahn #define pmap_pde_section(pde) l1pte_section_p(*(pde)) 336e1e4f5b1Sdrahn #define pmap_pde_page(pde) l1pte_page_p(*(pde)) 337e1e4f5b1Sdrahn #define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde)) 338e1e4f5b1Sdrahn 339e1e4f5b1Sdrahn /************************* ARM MMU configuration *****************************/ 340e1e4f5b1Sdrahn 341e86c322dSdrahn void pmap_pte_init_armv7(void); 342e1e4f5b1Sdrahn 343e1e4f5b1Sdrahn #endif /* !_LOCORE */ 344e1e4f5b1Sdrahn 345e1e4f5b1Sdrahn /*****************************************************************************/ 346e1e4f5b1Sdrahn 347e1e4f5b1Sdrahn /* 348e1e4f5b1Sdrahn * Definitions for MMU domains 349e1e4f5b1Sdrahn */ 350e1e4f5b1Sdrahn #define PMAP_DOMAINS 15 /* 15 'user' domains (0-14) */ 351e1e4f5b1Sdrahn #define PMAP_DOMAIN_KERNEL 15 /* The kernel uses domain #15 */ 352e1e4f5b1Sdrahn 353e1e4f5b1Sdrahn /* 354e1e4f5b1Sdrahn * These macros define the various bit masks in the PTE. 355e1e4f5b1Sdrahn * 356e1e4f5b1Sdrahn * We use these macros since we use different bits on different processor 357e1e4f5b1Sdrahn * models. 358e1e4f5b1Sdrahn */ 359f976bc9bSkettenis #define L1_S_PROT_UR_v7 (L1_S_V7_AP(AP_V7_KRUR)) 3604c0e588aSjsg #define L1_S_PROT_UW_v7 (L1_S_V7_AP(AP_KRWURW)) 3614c0e588aSjsg #define L1_S_PROT_KR_v7 (L1_S_V7_AP(AP_V7_KR)) 3624c0e588aSjsg #define L1_S_PROT_KW_v7 (L1_S_V7_AP(AP_KRW)) 3634c0e588aSjsg #define L1_S_PROT_MASK_v7 (L1_S_V7_AP(0x07)) 364e1e4f5b1Sdrahn 365db9ae5f9Spatrick #define L1_S_CACHE_MASK_v7 (L1_S_B|L1_S_C|L1_S_V7_TEX_MASK) 366e1e4f5b1Sdrahn 36786449d9fSkettenis #define L1_S_COHERENT_v7 (L1_S_C) 3680703b508Spatrick 369f976bc9bSkettenis #define L2_L_PROT_UR_v7 (L2_V7_AP(AP_V7_KRUR)) 370db316029Sjsg #define L2_L_PROT_UW_v7 (L2_V7_AP(AP_KRWURW)) 371db316029Sjsg #define L2_L_PROT_KR_v7 (L2_V7_AP(AP_V7_KR)) 372db316029Sjsg #define L2_L_PROT_KW_v7 (L2_V7_AP(AP_KRW)) 373db316029Sjsg #define L2_L_PROT_MASK_v7 (L2_V7_AP(0x07) | L2_V7_L_XN) 374e1e4f5b1Sdrahn 375db9ae5f9Spatrick #define L2_L_CACHE_MASK_v7 (L2_B|L2_C|L2_V7_L_TEX_MASK) 376e1e4f5b1Sdrahn 37786449d9fSkettenis #define L2_L_COHERENT_v7 (L2_C) 3780703b508Spatrick 379f976bc9bSkettenis #define L2_S_PROT_UR_v7 (L2_V7_AP(AP_V7_KRUR)) 380db9ae5f9Spatrick #define L2_S_PROT_UW_v7 (L2_V7_AP(AP_KRWURW)) 381db9ae5f9Spatrick #define L2_S_PROT_KR_v7 (L2_V7_AP(AP_V7_KR)) 382db9ae5f9Spatrick #define L2_S_PROT_KW_v7 (L2_V7_AP(AP_KRW)) 383db9ae5f9Spatrick #define L2_S_PROT_MASK_v7 (L2_V7_AP(0x07) | L2_V7_S_XN) 3840b0e92f9Sdrahn 385db9ae5f9Spatrick #define L2_S_CACHE_MASK_v7 (L2_B|L2_C|L2_V7_S_TEX_MASK) 386e1e4f5b1Sdrahn 38786449d9fSkettenis #define L2_S_COHERENT_v7 (L2_C) 3880703b508Spatrick 3890b0e92f9Sdrahn #define L1_S_PROTO_v7 (L1_TYPE_S) 390e1e4f5b1Sdrahn 3910b0e92f9Sdrahn #define L1_C_PROTO_v7 (L1_TYPE_C) 392e1e4f5b1Sdrahn 393e1e4f5b1Sdrahn #define L2_L_PROTO (L2_TYPE_L) 394e1e4f5b1Sdrahn 3950b0e92f9Sdrahn #define L2_S_PROTO_v7 (L2_TYPE_S) 396e1e4f5b1Sdrahn 3971308cfa5Sjsg #define L1_S_PROT_UR L1_S_PROT_UR_v7 3981308cfa5Sjsg #define L1_S_PROT_UW L1_S_PROT_UW_v7 3991308cfa5Sjsg #define L1_S_PROT_KR L1_S_PROT_KR_v7 4001308cfa5Sjsg #define L1_S_PROT_KW L1_S_PROT_KW_v7 4011308cfa5Sjsg #define L1_S_PROT_MASK L1_S_PROT_MASK_v7 4021308cfa5Sjsg 403db316029Sjsg #define L2_L_PROT_UR L2_L_PROT_UR_v7 404db316029Sjsg #define L2_L_PROT_UW L2_L_PROT_UW_v7 405db316029Sjsg #define L2_L_PROT_KR L2_L_PROT_KR_v7 406db316029Sjsg #define L2_L_PROT_KW L2_L_PROT_KW_v7 407db316029Sjsg #define L2_L_PROT_MASK L2_L_PROT_MASK_v7 408db316029Sjsg 409db9ae5f9Spatrick #define L2_S_PROT_UR L2_S_PROT_UR_v7 410db9ae5f9Spatrick #define L2_S_PROT_UW L2_S_PROT_UW_v7 411db9ae5f9Spatrick #define L2_S_PROT_KR L2_S_PROT_KR_v7 412db9ae5f9Spatrick #define L2_S_PROT_KW L2_S_PROT_KW_v7 4130b0e92f9Sdrahn #define L2_S_PROT_MASK L2_S_PROT_MASK_v7 4140b0e92f9Sdrahn 4150b0e92f9Sdrahn #define L1_S_CACHE_MASK L1_S_CACHE_MASK_v7 4160b0e92f9Sdrahn #define L2_L_CACHE_MASK L2_L_CACHE_MASK_v7 4170b0e92f9Sdrahn #define L2_S_CACHE_MASK L2_S_CACHE_MASK_v7 4180b0e92f9Sdrahn 4190703b508Spatrick #define L1_S_COHERENT L1_S_COHERENT_v7 4200703b508Spatrick #define L2_L_COHERENT L2_L_COHERENT_v7 4210703b508Spatrick #define L2_S_COHERENT L2_S_COHERENT_v7 4220703b508Spatrick 4230b0e92f9Sdrahn #define L1_S_PROTO L1_S_PROTO_v7 4240b0e92f9Sdrahn #define L1_C_PROTO L1_C_PROTO_v7 4250b0e92f9Sdrahn #define L2_S_PROTO L2_S_PROTO_v7 4260b0e92f9Sdrahn 427e1e4f5b1Sdrahn /* 428e1e4f5b1Sdrahn * These macros return various bits based on kernel/user and protection. 429e1e4f5b1Sdrahn * Note that the compiler will usually fold these at compile time. 430e1e4f5b1Sdrahn */ 431db9ae5f9Spatrick #ifndef _LOCORE 432db9ae5f9Spatrick static __inline pt_entry_t 4331308cfa5Sjsg L1_S_PROT(int ku, vm_prot_t pr) 4341308cfa5Sjsg { 4351308cfa5Sjsg pt_entry_t pte; 4361308cfa5Sjsg 4371308cfa5Sjsg if (ku == PTE_USER) 4381e8cdc2eSderaadt pte = (pr & PROT_WRITE) ? L1_S_PROT_UW : L1_S_PROT_UR; 4391308cfa5Sjsg else 4401e8cdc2eSderaadt pte = (pr & PROT_WRITE) ? L1_S_PROT_KW : L1_S_PROT_KR; 44175618eb8Skettenis 4421e8cdc2eSderaadt if ((pr & PROT_EXEC) == 0) 4431308cfa5Sjsg pte |= L1_S_V7_XN; 4441308cfa5Sjsg 4451308cfa5Sjsg return pte; 4461308cfa5Sjsg } 4471308cfa5Sjsg static __inline pt_entry_t 448db9ae5f9Spatrick L2_L_PROT(int ku, vm_prot_t pr) 449db9ae5f9Spatrick { 450db9ae5f9Spatrick pt_entry_t pte; 451e1e4f5b1Sdrahn 452db9ae5f9Spatrick if (ku == PTE_USER) 4531e8cdc2eSderaadt pte = (pr & PROT_WRITE) ? L2_L_PROT_UW : L2_L_PROT_UR; 454db9ae5f9Spatrick else 4551e8cdc2eSderaadt pte = (pr & PROT_WRITE) ? L2_L_PROT_KW : L2_L_PROT_KR; 45675618eb8Skettenis 4571e8cdc2eSderaadt if ((pr & PROT_EXEC) == 0) 458db9ae5f9Spatrick pte |= L2_V7_L_XN; 459db9ae5f9Spatrick 460db9ae5f9Spatrick return pte; 461db9ae5f9Spatrick } 462db9ae5f9Spatrick static __inline pt_entry_t 463db9ae5f9Spatrick L2_S_PROT(int ku, vm_prot_t pr) 464db9ae5f9Spatrick { 465db9ae5f9Spatrick pt_entry_t pte; 466db9ae5f9Spatrick 467db9ae5f9Spatrick if (ku == PTE_USER) 4681e8cdc2eSderaadt pte = (pr & PROT_WRITE) ? L2_S_PROT_UW : L2_S_PROT_UR; 469db9ae5f9Spatrick else 4701e8cdc2eSderaadt pte = (pr & PROT_WRITE) ? L2_S_PROT_KW : L2_S_PROT_KR; 47175618eb8Skettenis 4721e8cdc2eSderaadt if ((pr & PROT_EXEC) == 0) 473db9ae5f9Spatrick pte |= L2_V7_S_XN; 474db9ae5f9Spatrick 475db9ae5f9Spatrick return pte; 476db9ae5f9Spatrick } 477db9ae5f9Spatrick 47820b8e21aSmpi static __inline int 479db9ae5f9Spatrick l2pte_is_writeable(pt_entry_t pte, struct pmap *pm) 480db9ae5f9Spatrick { 4819cd732efSkettenis return (pte & L2_V7_AP(0x4)) == 0; 482db9ae5f9Spatrick } 483db9ae5f9Spatrick #endif 484e1e4f5b1Sdrahn 485e1e4f5b1Sdrahn /* 486e1e4f5b1Sdrahn * Macros to test if a mapping is mappable with an L1 Section mapping 487e1e4f5b1Sdrahn * or an L2 Large Page mapping. 488e1e4f5b1Sdrahn */ 489e1e4f5b1Sdrahn #define L1_S_MAPPABLE_P(va, pa, size) \ 490e1e4f5b1Sdrahn ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE) 491e1e4f5b1Sdrahn 492e1e4f5b1Sdrahn #define L2_L_MAPPABLE_P(va, pa, size) \ 493e1e4f5b1Sdrahn ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE) 494e1e4f5b1Sdrahn 495e1e4f5b1Sdrahn #endif /* _KERNEL */ 496e1e4f5b1Sdrahn 49762fe2d4bSmiod #ifndef _LOCORE 49862fe2d4bSmiod /* 49962fe2d4bSmiod * pmap-specific data store in the vm_page structure. 50062fe2d4bSmiod */ 50162fe2d4bSmiod struct vm_page_md { 50262fe2d4bSmiod struct pv_entry *pvh_list; /* pv_entry list */ 50362fe2d4bSmiod int pvh_attrs; /* page attributes */ 50462fe2d4bSmiod }; 50562fe2d4bSmiod 50662fe2d4bSmiod #define VM_MDPAGE_INIT(pg) \ 50762fe2d4bSmiod do { \ 50862fe2d4bSmiod (pg)->mdpage.pvh_list = NULL; \ 50962fe2d4bSmiod (pg)->mdpage.pvh_attrs = 0; \ 51062fe2d4bSmiod } while (/*CONSTCOND*/0) 51162fe2d4bSmiod #endif /* _LOCORE */ 51262fe2d4bSmiod 5132fa72412Spirofti #endif /* _ARM_PMAP_H_ */ 514