1 /* $NetBSD: pmap.h,v 1.41 2022/05/26 05:34:04 skrll Exp $ */
2
3 /* $OpenBSD: pmap.h,v 1.35 2007/12/14 18:32:23 deraadt Exp $ */
4
5 /*
6 * Copyright (c) 2002-2004 Michael Shalayeff
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
22 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 /*
32 * Pmap header for hppa.
33 */
34
35 #ifndef _HPPA_PMAP_H_
36 #define _HPPA_PMAP_H_
37
38 #ifdef _KERNEL_OPT
39 #include "opt_cputype.h"
40 #endif
41
42 #include <sys/rwlock.h>
43 #include <machine/pte.h>
44 #include <machine/cpufunc.h>
45
46 #include <uvm/uvm_pglist.h>
47 #include <uvm/uvm_object.h>
48
49 #ifdef _KERNEL
50
51 #define PMAP_NEED_PROCWR
52
53 struct pmap {
54 struct uvm_object pm_obj; /* object (lck by object lock) */
55 #define pm_lock pm_obj.vmobjlock
56 krwlock_t pm_obj_lock; /* lock for pm_obj */
57 struct vm_page *pm_ptphint;
58 struct vm_page *pm_pdir_pg; /* vm_page for pdir */
59 volatile uint32_t *pm_pdir; /* page dir (read-only after create) */
60 pa_space_t pm_space; /* space id (read-only after create) */
61 u_int pm_pid; /* prot id (read-only after create) */
62
63 struct pmap_statistics pm_stats;
64 };
65
66 #define PVF_MOD PTE_PROT(TLB_DIRTY) /* pg/mp is modified */
67 #define PVF_REF PTE_PROT(TLB_REFTRAP) /* pg/mp (inv) is referenced */
68 #define PVF_WRITE PTE_PROT(TLB_WRITE) /* pg/mp is writable */
69 #define PVF_EXEC PTE_PROT(TLB_EXECUTE) /* pg/mp is executable */
70 #define PVF_UNCACHEABLE PTE_PROT(TLB_UNCACHEABLE) /* pg/mp is uncacheable */
71
72 #define HPPA_MAX_PID 0xfffa
73 #define HPPA_SID_MAX 0x7ffd
74
75 #define PMAP_DIRECTMAP 0x10000000 /* kenter_pa */
76
77 /*
78 * DON'T CHANGE THIS - this is assumed in lots of places.
79 */
80 #define HPPA_SID_KERNEL 0
81 #define HPPA_PID_KERNEL 2
82
83 struct pv_entry { /* locked by its list's pvh_lock */
84 struct pv_entry *pv_next;
85 struct pmap *pv_pmap; /* the pmap */
86 vaddr_t pv_va; /* the virtual address + flags */
87 #define PV_VAMASK (~(PAGE_SIZE - 1))
88 #define PV_KENTER __BIT(0)
89
90 struct vm_page *pv_ptp; /* the vm_page of the PTP */
91 };
92
93 extern int pmap_hptsize;
94 extern struct pdc_hwtlb pdc_hwtlb;
95
96 /*
97 * pool quickmaps
98 */
hppa_map_poolpage(paddr_t pa)99 static inline vaddr_t hppa_map_poolpage(paddr_t pa)
100 {
101 return (vaddr_t)pa;
102 }
103
hppa_unmap_poolpage(vaddr_t va)104 static inline paddr_t hppa_unmap_poolpage(vaddr_t va)
105 {
106 pdcache(HPPA_SID_KERNEL, va, PAGE_SIZE);
107
108 #if defined(HP8000_CPU) || defined(HP8200_CPU) || \
109 defined(HP8500_CPU) || defined(HP8600_CPU)
110 pdtlb(HPPA_SID_KERNEL, va);
111 #endif
112
113 return (paddr_t)va;
114 }
115
116 #define PMAP_MAP_POOLPAGE(pa) hppa_map_poolpage(pa)
117 #define PMAP_UNMAP_POOLPAGE(va) hppa_unmap_poolpage(va)
118
119 /*
120 * according to the parisc manual aliased va's should be
121 * different by high 12 bits only.
122 */
123 #define PMAP_PREFER(o,h,s,td) pmap_prefer((o), (h), (td))
124
125 static inline void
pmap_prefer(vaddr_t fo,vaddr_t * va,int td)126 pmap_prefer(vaddr_t fo, vaddr_t *va, int td)
127 {
128 vaddr_t newva;
129
130 newva = (*va & HPPA_PGAMASK) | (fo & HPPA_PGAOFF);
131 if (td) {
132 if (newva > *va)
133 newva -= HPPA_PGALIAS;
134 } else {
135 if (newva < *va)
136 newva += HPPA_PGALIAS;
137 }
138 *va = newva;
139 }
140
141 #define pmap_sid2pid(s) (((s) + 1) << 1)
142 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
143 #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
144 #define pmap_update(p)
145
146 #define pmap_copy(dpmap,spmap,da,len,sa)
147
148 #define pmap_clear_modify(pg) pmap_changebit(pg, 0, PTE_PROT(TLB_DIRTY))
149 #define pmap_clear_reference(pg) \
150 pmap_changebit(pg, PTE_PROT(TLB_REFTRAP), 0)
151 #define pmap_is_modified(pg) pmap_testbit(pg, PTE_PROT(TLB_DIRTY))
152 #define pmap_is_referenced(pg) pmap_testbit(pg, PTE_PROT(TLB_REFTRAP))
153 #define pmap_phys_address(ppn) ((ppn) << PAGE_SHIFT)
154
155 void pmap_activate(struct lwp *);
156
157 void pmap_bootstrap(vaddr_t);
158 bool pmap_changebit(struct vm_page *, u_int, u_int);
159 bool pmap_testbit(struct vm_page *, u_int);
160 void pmap_write_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
161 void pmap_remove(struct pmap *pmap, vaddr_t sva, vaddr_t eva);
162 void pmap_page_remove(struct vm_page *pg);
163
164 void pmap_procwr(struct proc *, vaddr_t, size_t);
165
166 static inline void
pmap_deactivate(struct lwp * l)167 pmap_deactivate(struct lwp *l)
168 {
169 /* Nothing. */
170 }
171
172 static inline bool
pmap_remove_all(struct pmap * pmap)173 pmap_remove_all(struct pmap *pmap)
174 {
175 /* Nothing. */
176 return false;
177 }
178
179 static inline int
pmap_prot(struct pmap * pmap,int prot)180 pmap_prot(struct pmap *pmap, int prot)
181 {
182 extern u_int hppa_prot[];
183 return (hppa_prot[prot] | (pmap == pmap_kernel() ? 0 : TLB_USER));
184 }
185
186 static inline void
pmap_page_protect(struct vm_page * pg,vm_prot_t prot)187 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
188 {
189 if ((prot & UVM_PROT_WRITE) == 0) {
190 if (prot & (UVM_PROT_RX))
191 pmap_changebit(pg, 0, PTE_PROT(TLB_WRITE));
192 else
193 pmap_page_remove(pg);
194 }
195 }
196
197 static inline void
pmap_protect(struct pmap * pmap,vaddr_t sva,vaddr_t eva,vm_prot_t prot)198 pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
199 {
200 if ((prot & UVM_PROT_WRITE) == 0) {
201 if (prot & (UVM_PROT_RX))
202 pmap_write_protect(pmap, sva, eva, prot);
203 else
204 pmap_remove(pmap, sva, eva);
205 }
206 }
207
208 #define pmap_sid(pmap, va) \
209 ((((va) & 0xc0000000) != 0xc0000000) ? \
210 (pmap)->pm_space : HPPA_SID_KERNEL)
211
212 #define __HAVE_VM_PAGE_MD
213
214 struct pv_entry;
215
216 struct vm_page_md {
217 struct pv_entry *pvh_list; /* head of list */
218 u_int pvh_attrs; /* to preserve ref/mod */
219 };
220
221 #define VM_MDPAGE_INIT(pg) \
222 do { \
223 (pg)->mdpage.pvh_list = NULL; \
224 (pg)->mdpage.pvh_attrs = 0; \
225 } while (0)
226
227 #endif /* _KERNEL */
228
229 #endif /* _HPPA_PMAP_H_ */
230