1 /* $NetBSD: pmap.h,v 1.99 2022/07/19 22:04:14 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 1999, 2000, 2001, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center and by Chris G. Demetriou.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1991, 1993
35 * The Regents of the University of California. All rights reserved.
36 *
37 * This code is derived from software contributed to Berkeley by
38 * the Systems Programming Group of the University of Utah Computer
39 * Science Department.
40 *
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
43 * are met:
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 * 3. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * @(#)pmap.h 8.1 (Berkeley) 6/10/93
66 */
67
68 /*
69 * Copyright (c) 1987 Carnegie-Mellon University
70 *
71 * This code is derived from software contributed to Berkeley by
72 * the Systems Programming Group of the University of Utah Computer
73 * Science Department.
74 *
75 * Redistribution and use in source and binary forms, with or without
76 * modification, are permitted provided that the following conditions
77 * are met:
78 * 1. Redistributions of source code must retain the above copyright
79 * notice, this list of conditions and the following disclaimer.
80 * 2. Redistributions in binary form must reproduce the above copyright
81 * notice, this list of conditions and the following disclaimer in the
82 * documentation and/or other materials provided with the distribution.
83 * 3. All advertising materials mentioning features or use of this software
84 * must display the following acknowledgement:
85 * This product includes software developed by the University of
86 * California, Berkeley and its contributors.
87 * 4. Neither the name of the University nor the names of its contributors
88 * may be used to endorse or promote products derived from this software
89 * without specific prior written permission.
90 *
91 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
92 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
93 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
94 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
95 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
96 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
97 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
98 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
99 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
100 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
101 * SUCH DAMAGE.
102 *
103 * @(#)pmap.h 8.1 (Berkeley) 6/10/93
104 */
105
106 #ifndef _PMAP_MACHINE_
107 #define _PMAP_MACHINE_
108
109 #if defined(_KERNEL_OPT)
110 #include "opt_multiprocessor.h"
111 #endif
112
113 #include <sys/param.h>
114 #include <sys/types.h>
115
116 #include <sys/mutex.h>
117 #include <sys/queue.h>
118
119 #include <machine/pte.h>
120
121 /*
122 * Machine-dependent virtual memory state.
123 *
124 * If we ever support processor numbers higher than 63, we'll have to
125 * rethink the CPU mask.
126 *
127 * Note pm_asn and pm_asngen are arrays allocated in pmap_create().
128 * Their size is based on the PCS count from the HWRPB, and indexed
129 * by processor ID (from `whami'). This is all padded to COHERENCY_UNIT
130 * to avoid false sharing.
131 *
132 * The kernel pmap is a special case; since the kernel uses only ASM
133 * mappings and uses a reserved ASN to keep the TLB clean, we don't
134 * allocate any ASN info for the kernel pmap at all.
135 * arrays which hold enough for ALPHA_MAXPROCS.
136 */
137
138 LIST_HEAD(pmap_pagelist, vm_page);
139 LIST_HEAD(pmap_pvlist, pv_entry);
140
141 struct pmap_percpu {
142 unsigned int pmc_asn; /* address space number */
143 unsigned int pmc_pad0;
144 unsigned long pmc_asngen; /* ASN generation number */
145 unsigned int pmc_needisync; /* CPU needes isync */
146 unsigned int pmc_pad1;
147 pt_entry_t *pmc_lev1map; /* level 1 map */
148 unsigned long pmc_padN[(COHERENCY_UNIT / 8) - 4];
149 };
150
151 struct pmap { /* pmaps are aligned to COHERENCY_UNIT boundaries */
152 /* pmaps are locked by hashed mutexes */
153 unsigned long pm_cpus; /* [ 0] CPUs using pmap */
154 struct pmap_statistics pm_stats; /* [ 8] statistics */
155 unsigned int pm_count; /* [24] reference count */
156 unsigned int __pm_spare0; /* [28] spare field */
157 struct pmap_pagelist pm_ptpages; /* [32] list of PT pages */
158 struct pmap_pvlist pm_pvents; /* [40] list of PV entries */
159 TAILQ_ENTRY(pmap) pm_list; /* [48] list of all pmaps */
160 /* -- COHERENCY_UNIT boundary -- */
161 struct pmap_percpu pm_percpu[]; /* [64] per-CPU data */
162 /* variable length */
163 };
164
165 #define PMAP_SIZEOF(x) \
166 (ALIGN(offsetof(struct pmap, pm_percpu[(x)])))
167
168 #define PMAP_ASN_KERNEL 0 /* kernel-reserved ASN */
169 #define PMAP_ASN_FIRST_USER 1 /* first user ASN */
170 #define PMAP_ASNGEN_INVALID 0 /* reserved (invalid) ASN generation */
171 #define PMAP_ASNGEN_INITIAL 1 /* first valid generatation */
172
173 /*
174 * For each struct vm_page, there is a list of all currently valid virtual
175 * mappings of that page. An entry is a pv_entry_t, the list is pv_table.
176 */
177 typedef struct pv_entry {
178 struct pv_entry *pv_next; /* next pv_entry on page list */
179 LIST_ENTRY(pv_entry) pv_link; /* link on owning pmap's list */
180 struct pmap *pv_pmap; /* pmap where mapping lies */
181 vaddr_t pv_va; /* virtual address for mapping */
182 pt_entry_t *pv_pte; /* PTE that maps the VA */
183 } *pv_entry_t;
184
185 /* attrs in pvh_listx */
186 #define PGA_MODIFIED 0x01UL /* modified */
187 #define PGA_REFERENCED 0x02UL /* referenced */
188 #define PGA_ATTRS (PGA_MODIFIED | PGA_REFERENCED)
189
190 /* pvh_usage */
191 #define PGU_NORMAL 0 /* free or normal use */
192 #define PGU_PVENT 1 /* PV entries */
193 #define PGU_L1PT 2 /* level 1 page table */
194 #define PGU_L2PT 3 /* level 2 page table */
195 #define PGU_L3PT 4 /* level 3 page table */
196
197 #ifdef _KERNEL
198
199 #include <sys/atomic.h>
200
201 struct cpu_info;
202 struct trapframe;
203
204 void pmap_init_cpu(struct cpu_info *);
205 #if defined(MULTIPROCESSOR)
206 void pmap_tlb_shootdown_ipi(struct cpu_info *, struct trapframe *);
207 #endif /* MULTIPROCESSOR */
208
209 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
210 #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
211
212 #define pmap_copy(dp, sp, da, l, sa) /* nothing */
213 #define pmap_update(pmap) /* nothing (yet) */
214
215 #define pmap_is_referenced(pg) \
216 (((pg)->mdpage.pvh_listx & PGA_REFERENCED) != 0)
217 #define pmap_is_modified(pg) \
218 (((pg)->mdpage.pvh_listx & PGA_MODIFIED) != 0)
219
220 #define PMAP_STEAL_MEMORY /* enable pmap_steal_memory() */
221 #define PMAP_GROWKERNEL /* enable pmap_growkernel() */
222
223 #define PMAP_DIRECT
224 #define PMAP_DIRECT_MAP(pa) ALPHA_PHYS_TO_K0SEG((pa))
225 #define PMAP_DIRECT_UNMAP(va) ALPHA_K0SEG_TO_PHYS((va))
226
227 static __inline int
pmap_direct_process(paddr_t pa,voff_t pgoff,size_t len,int (* process)(void *,size_t,void *),void * arg)228 pmap_direct_process(paddr_t pa, voff_t pgoff, size_t len,
229 int (*process)(void *, size_t, void *), void *arg)
230 {
231 vaddr_t va = PMAP_DIRECT_MAP(pa);
232
233 return process((void *)(va + pgoff), len, arg);
234 }
235
236 /*
237 * Alternate mapping hooks for pool pages. Avoids thrashing the TLB.
238 */
239 #define PMAP_MAP_POOLPAGE(pa) PMAP_DIRECT_MAP(pa)
240 #define PMAP_UNMAP_POOLPAGE(va) PMAP_DIRECT_UNMAP(va)
241
242 /*
243 * Other hooks for the pool allocator.
244 */
245 #define POOL_VTOPHYS(va) ALPHA_K0SEG_TO_PHYS((vaddr_t) (va))
246
247 bool pmap_pageidlezero(paddr_t);
248 #define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa))
249
250 paddr_t vtophys(vaddr_t);
251
252 /* Machine-specific functions. */
253 void pmap_bootstrap(paddr_t, u_int, u_long);
254 int pmap_emulate_reference(struct lwp *, vaddr_t, int, int);
255
256 #define pmap_pte_pa(pte) (PG_PFNUM(*(pte)) << PGSHIFT)
257 #define pmap_pte_prot(pte) (*(pte) & PG_PROT)
258 #define pmap_pte_w(pte) (*(pte) & PG_WIRED)
259 #define pmap_pte_v(pte) (*(pte) & PG_V)
260 #define pmap_pte_pv(pte) (*(pte) & PG_PVLIST)
261 #define pmap_pte_asm(pte) (*(pte) & PG_ASM)
262 #define pmap_pte_exec(pte) (*(pte) & PG_EXEC)
263
264 #define pmap_pte_set_w(pte, v) \
265 do { \
266 if (v) \
267 *(pte) |= PG_WIRED; \
268 else \
269 *(pte) &= ~PG_WIRED; \
270 } while (0)
271
272 #define pmap_pte_w_chg(pte, nw) ((nw) ^ pmap_pte_w(pte))
273
274 #define pmap_pte_set_prot(pte, np) \
275 do { \
276 *(pte) &= ~PG_PROT; \
277 *(pte) |= (np); \
278 } while (0)
279
280 #define pmap_pte_prot_chg(pte, np) ((np) ^ pmap_pte_prot(pte))
281
282 static __inline pt_entry_t *
pmap_lev1map(pmap_t pmap)283 pmap_lev1map(pmap_t pmap)
284 {
285 if (__predict_false(pmap == pmap_kernel())) {
286 return kernel_lev1map;
287 }
288 /*
289 * We're just reading a per-CPU field that's the same on
290 * all CPUs, so don't bother disabling preemption around
291 * this.
292 */
293 return pmap->pm_percpu[cpu_number()].pmc_lev1map;
294 }
295
296 static __inline pt_entry_t *
pmap_l1pte(pt_entry_t * lev1map,vaddr_t v)297 pmap_l1pte(pt_entry_t *lev1map, vaddr_t v)
298 {
299 KASSERT(lev1map != NULL);
300 return &lev1map[l1pte_index(v)];
301 }
302
303 static __inline pt_entry_t *
pmap_l2pte(pt_entry_t * lev1map,vaddr_t v,pt_entry_t * l1pte)304 pmap_l2pte(pt_entry_t *lev1map, vaddr_t v, pt_entry_t *l1pte)
305 {
306 pt_entry_t *lev2map;
307
308 if (l1pte == NULL) {
309 l1pte = pmap_l1pte(lev1map, v);
310 if (pmap_pte_v(l1pte) == 0)
311 return NULL;
312 }
313
314 lev2map = (pt_entry_t *)ALPHA_PHYS_TO_K0SEG(pmap_pte_pa(l1pte));
315 return &lev2map[l2pte_index(v)];
316 }
317
318 static __inline pt_entry_t *
pmap_l3pte(pt_entry_t * lev1map,vaddr_t v,pt_entry_t * l2pte)319 pmap_l3pte(pt_entry_t *lev1map, vaddr_t v, pt_entry_t *l2pte)
320 {
321 pt_entry_t *l1pte, *lev2map, *lev3map;
322
323 if (l2pte == NULL) {
324 l1pte = pmap_l1pte(lev1map, v);
325 if (pmap_pte_v(l1pte) == 0)
326 return NULL;
327
328 lev2map = (pt_entry_t *)ALPHA_PHYS_TO_K0SEG(pmap_pte_pa(l1pte));
329 l2pte = &lev2map[l2pte_index(v)];
330 if (pmap_pte_v(l2pte) == 0)
331 return NULL;
332 }
333
334 lev3map = (pt_entry_t *)ALPHA_PHYS_TO_K0SEG(pmap_pte_pa(l2pte));
335 return &lev3map[l3pte_index(v)];
336 }
337
338 /*
339 * Macro for processing deferred I-stream synchronization.
340 *
341 * The pmap module may defer syncing the user I-stream until the
342 * return to userspace, since the IMB PALcode op can be quite
343 * expensive. Since user instructions won't be executed until
344 * the return to userspace, this can be deferred until userret().
345 */
346 #define PMAP_USERRET(pmap) \
347 do { \
348 const unsigned long cpu_id = cpu_number(); \
349 \
350 if ((pmap)->pm_percpu[cpu_id].pmc_needisync) { \
351 (pmap)->pm_percpu[cpu_id].pmc_needisync = 0; \
352 alpha_pal_imb(); \
353 } \
354 } while (0)
355
356 /*
357 * pmap-specific data store in the vm_page structure.
358 */
359 #define __HAVE_VM_PAGE_MD
360 struct vm_page_md {
361 uintptr_t pvh_listx; /* pv_entry list + attrs */
362 /*
363 * XXX These fields are only needed for pages that are used
364 * as PT pages. It would be nice to find safely-unused fields
365 * in the vm_page structure that could be used instead.
366 *
367 * (Only 11 bits are needed ... we need to be able to count from
368 * 0-1025 ... 1025 because sometimes we need to take an extra
369 * reference temporarily in pmap_enter().)
370 */
371 unsigned int pvh_physpgrefs; /* # refs as a PT page */
372 unsigned int pvh_spare0; /* XXX spare field */
373 };
374
375 /* Reference counting for page table pages. */
376 #define PHYSPAGE_REFCNT(pg) \
377 atomic_load_relaxed(&(pg)->mdpage.pvh_physpgrefs)
378 #define PHYSPAGE_REFCNT_SET(pg, v) \
379 atomic_store_relaxed(&(pg)->mdpage.pvh_physpgrefs, (v))
380 #define PHYSPAGE_REFCNT_INC(pg) \
381 atomic_inc_uint_nv(&(pg)->mdpage.pvh_physpgrefs)
382 #define PHYSPAGE_REFCNT_DEC(pg) \
383 atomic_dec_uint_nv(&(pg)->mdpage.pvh_physpgrefs)
384
385 #define VM_MDPAGE_PVS(pg) \
386 ((struct pv_entry *)((pg)->mdpage.pvh_listx & ~3UL))
387
388 #define VM_MDPAGE_INIT(pg) \
389 do { \
390 (pg)->mdpage.pvh_listx = 0UL; \
391 } while (/*CONSTCOND*/0)
392
393 #endif /* _KERNEL */
394
395 #endif /* _PMAP_MACHINE_ */
396