1 /* $OpenBSD: pmap.h,v 1.51 2015/07/27 03:36:38 guenther Exp $ */ 2 3 /* 4 * Copyright (c) 2002-2004 Michael Shalayeff 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT, 20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 26 * THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #ifndef _MACHINE_PMAP_H_ 30 #define _MACHINE_PMAP_H_ 31 32 #include <uvm/uvm_object.h> 33 #include <sys/mutex.h> 34 35 #ifdef _KERNEL 36 #include <machine/pte.h> 37 38 struct pmap { 39 struct mutex pm_mtx; 40 struct uvm_object pm_obj; 41 struct vm_page *pm_ptphint; 42 struct vm_page *pm_pdir_pg; /* vm_page for pdir */ 43 volatile u_int32_t *pm_pdir; /* page dir (read-only after create) */ 44 pa_space_t pm_space; /* space id (read-only after create) */ 45 u_int pm_pid; /* prot id (read-only after create) */ 46 47 struct pmap_statistics pm_stats; 48 }; 49 typedef struct pmap *pmap_t; 50 51 #define HPPA_MAX_PID 0xfffa 52 #define HPPA_SID_MAX 0x7ffd 53 #define HPPA_SID_KERNEL 0 54 #define HPPA_PID_KERNEL 2 55 56 #define KERNEL_ACCESS_ID 1 57 #define KERNEL_TEXT_PROT (TLB_AR_KRX | (KERNEL_ACCESS_ID << 1)) 58 #define KERNEL_DATA_PROT (TLB_AR_KRW | (KERNEL_ACCESS_ID << 1)) 59 60 struct pv_entry { /* locked by its list's pvh_lock */ 61 struct pv_entry *pv_next; 62 struct pmap *pv_pmap; /* the pmap */ 63 vaddr_t pv_va; /* the virtual address */ 64 struct vm_page *pv_ptp; /* the vm_page of the PTP */ 65 }; 66 67 /* also match the hardware tlb walker definition */ 68 struct vp_entry { 69 u_int vp_tag; 70 u_int vp_tlbprot; 71 u_int vp_tlbpage; 72 u_int vp_ptr; 73 }; 74 75 extern void gateway_page(void); 76 extern struct pmap kernel_pmap_store; 77 78 #if defined(HP7100LC_CPU) || defined(HP7300LC_CPU) 79 extern int pmap_hptsize; 80 extern struct pdc_hwtlb pdc_hwtlb; 81 #endif 82 83 /* 84 * pool quickmaps 85 */ 86 #define pmap_map_direct(pg) ((vaddr_t)VM_PAGE_TO_PHYS(pg)) 87 struct vm_page *pmap_unmap_direct(vaddr_t); 88 #define __HAVE_PMAP_DIRECT 89 90 /* 91 * according to the parisc manual aliased va's should be 92 * different by high 12 bits only. 93 */ 94 #define PMAP_PREFER(o,h) pmap_prefer(o, h) 95 static __inline__ vaddr_t 96 pmap_prefer(vaddr_t offs, vaddr_t hint) 97 { 98 vaddr_t pmap_prefer_hint = (hint & HPPA_PGAMASK) | (offs & HPPA_PGAOFF); 99 if (pmap_prefer_hint < hint) 100 pmap_prefer_hint += HPPA_PGALIAS; 101 return pmap_prefer_hint; 102 } 103 104 /* pmap prefer alignment */ 105 #define PMAP_PREFER_ALIGN() (HPPA_PGALIAS) 106 /* pmap prefer offset within alignment */ 107 #define PMAP_PREFER_OFFSET(of) ((of) & HPPA_PGAOFF) 108 109 #define pmap_sid2pid(s) (((s) + 1) << 1) 110 #define pmap_kernel() (&kernel_pmap_store) 111 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) 112 #define pmap_update(pm) (void)(pm) 113 #define pmap_copy(dpmap,spmap,da,len,sa) 114 115 #define pmap_clear_modify(pg) pmap_changebit(pg, 0, PTE_PROT(TLB_DIRTY)) 116 #define pmap_clear_reference(pg) pmap_changebit(pg, PTE_PROT(TLB_REFTRAP), 0) 117 #define pmap_is_modified(pg) pmap_testbit(pg, PTE_PROT(TLB_DIRTY)) 118 #define pmap_is_referenced(pg) pmap_testbit(pg, PTE_PROT(TLB_REFTRAP)) 119 120 #define pmap_unuse_final(p) /* nothing */ 121 #define pmap_remove_holes(vm) do { /* nothing */ } while (0) 122 123 void pmap_bootstrap(vaddr_t); 124 boolean_t pmap_changebit(struct vm_page *, pt_entry_t, pt_entry_t); 125 boolean_t pmap_testbit(struct vm_page *, pt_entry_t); 126 void pmap_write_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t); 127 void pmap_remove(struct pmap *pmap, vaddr_t sva, vaddr_t eva); 128 void pmap_page_remove(struct vm_page *pg); 129 130 static __inline int 131 pmap_prot(struct pmap *pmap, int prot) 132 { 133 extern u_int hppa_prot[]; 134 return (hppa_prot[prot] | (pmap == pmap_kernel()? 0 : TLB_USER)); 135 } 136 137 static __inline void 138 pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 139 { 140 if ((prot & PROT_WRITE) == 0) { 141 if (prot & (PROT_READ | PROT_EXEC)) 142 pmap_changebit(pg, 0, PTE_PROT(TLB_WRITE)); 143 else 144 pmap_page_remove(pg); 145 } 146 } 147 148 static __inline void 149 pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) 150 { 151 if ((prot & PROT_WRITE) == 0) { 152 if (prot & (PROT_READ | PROT_EXEC)) 153 pmap_write_protect(pmap, sva, eva, prot); 154 else 155 pmap_remove(pmap, sva, eva); 156 } 157 } 158 159 #endif /* _KERNEL */ 160 161 #if !defined(_LOCORE) 162 struct pv_entry; 163 struct vm_page_md { 164 struct mutex pvh_mtx; 165 struct pv_entry *pvh_list; /* head of list (locked by pvh_mtx) */ 166 u_int pvh_attrs; /* to preserve ref/mod */ 167 }; 168 169 #define VM_MDPAGE_INIT(pg) do { \ 170 mtx_init(&(pg)->mdpage.pvh_mtx, IPL_VM); \ 171 (pg)->mdpage.pvh_list = NULL; \ 172 (pg)->mdpage.pvh_attrs = 0; \ 173 } while (0) 174 #endif 175 176 #endif /* _MACHINE_PMAP_H_ */ 177