xref: /netbsd-src/sys/uvm/pmap/pmap.h (revision 8b7897c222faf19a07b132f0408bf6f1fafcce17)
1 /*	$NetBSD: pmap.h,v 1.28 2024/11/25 22:03:44 skrll Exp $	*/
2 
3 /*
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * Ralph Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)pmap.h	8.1 (Berkeley) 6/10/93
35  */
36 
37 /*
38  * Copyright (c) 1987 Carnegie-Mellon University
39  *
40  * This code is derived from software contributed to Berkeley by
41  * Ralph Campbell.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice, this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright
49  *    notice, this list of conditions and the following disclaimer in the
50  *    documentation and/or other materials provided with the distribution.
51  * 3. All advertising materials mentioning features or use of this software
52  *    must display the following acknowledgement:
53  *	This product includes software developed by the University of
54  *	California, Berkeley and its contributors.
55  * 4. Neither the name of the University nor the names of its contributors
56  *    may be used to endorse or promote products derived from this software
57  *    without specific prior written permission.
58  *
59  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69  * SUCH DAMAGE.
70  *
71  *	@(#)pmap.h	8.1 (Berkeley) 6/10/93
72  */
73 
74 #ifdef _KERNEL_OPT
75 #include "opt_efi.h"
76 #endif
77 
78 #ifndef	_UVM_PMAP_PMAP_H_
79 #define	_UVM_PMAP_PMAP_H_
80 
81 #include <sys/rwlock.h>
82 #include <uvm/uvm_object.h>
83 #include <uvm/uvm_pmap.h>
84 #include <uvm/uvm_stat.h>
85 
86 #ifdef _KERNEL
87 
88 #ifdef UVMHIST
89 UVMHIST_DECL(pmapexechist);
90 UVMHIST_DECL(pmaphist);
91 UVMHIST_DECL(pmapxtabhist);
92 #endif
93 
94 /*
95  * Alternate mapping hooks for pool pages.  Avoids thrashing the TLB.
96  */
97 struct vm_page *pmap_md_alloc_poolpage(int);
98 
99 #if !defined(KASAN)
100 vaddr_t pmap_map_poolpage(paddr_t);
101 paddr_t pmap_unmap_poolpage(vaddr_t);
102 #define	PMAP_ALLOC_POOLPAGE(flags)	pmap_md_alloc_poolpage(flags)
103 #define	PMAP_MAP_POOLPAGE(pa)		pmap_map_poolpage(pa)
104 #define	PMAP_UNMAP_POOLPAGE(va)		pmap_unmap_poolpage(va)
105 
106 #if defined(_LP64)
107 #define PMAP_DIRECT
108 static __inline int
109 pmap_direct_process(paddr_t pa, voff_t pgoff, size_t len,
110     int (*process)(void *, size_t, void *), void *arg)
111 {
112         vaddr_t va = pmap_md_direct_map_paddr(pa);
113 
114         return process((void *)(va + pgoff), len, arg);
115 }
116 #endif
117 #endif
118 
119 #define PMAP_MAP_PDETABPAGE(pa)		pmap_md_map_poolpage(pa, PAGE_SIZE)
120 #define PMAP_MAP_SEGTABPAGE(pa)		pmap_md_map_poolpage(pa, PAGE_SIZE)
121 #define PMAP_MAP_PTEPAGE(pa)		pmap_md_map_poolpage(pa, PAGE_SIZE)
122 
123 /*
124  * The user address space is mapped using a two level structure where
125  * virtual address bits 31..22 are used to index into a segment table which
126  * points to a page worth of PTEs (4096 page can hold 1024 PTEs).
127  * Bits 21..12 are then used to index a PTE which describes a page within
128  * a segment.
129  */
130 
131 #define pmap_trunc_seg(x)	((vaddr_t)(x) & ~SEGOFSET)
132 #define pmap_round_seg(x)	(((vaddr_t)(x) + SEGOFSET) & ~SEGOFSET)
133 
134 /*
135  * Each ptpage maps a "segment" worth of address space.  That is
136  * NPTEPG * PAGE_SIZE.
137  */
138 
139 #endif /* _KERNEL */
140 
141 typedef struct {
142 	pt_entry_t ppg_ptes[NPTEPG];
143 } pmap_ptpage_t;
144 
145 #if defined(PMAP_HWPAGEWALKER)
146 typedef union pmap_pdetab {
147 	pd_entry_t		pde_pde[PMAP_PDETABSIZE];
148 	union pmap_pdetab *	pde_next;
149 } pmap_pdetab_t;
150 #endif
151 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
152 typedef union pmap_segtab {
153 #ifdef _LP64
154 	union pmap_segtab *	seg_seg[PMAP_SEGTABSIZE];
155 #endif
156 	pmap_ptpage_t *		seg_ppg[PMAP_SEGTABSIZE];
157 #ifdef PMAP_HWPAGEWALKER
158 	pd_entry_t		seg_pde[PMAP_PDETABSIZE];
159 #endif
160 	union pmap_segtab *	seg_next;
161 } pmap_segtab_t;
162 #endif
163 
164 
165 #ifdef _KERNEL
166 struct pmap;
167 typedef bool (*pte_callback_t)(struct pmap *, vaddr_t, vaddr_t,
168 	pt_entry_t *, uintptr_t);
169 
170 /*
171  * Common part of the bootstraping the system enough to run with
172  * virtual memory.
173  */
174 void pmap_bootstrap_common(void);
175 
176 pt_entry_t *pmap_pte_lookup(struct pmap *, vaddr_t);
177 pt_entry_t *pmap_pte_reserve(struct pmap *, vaddr_t, int);
178 void pmap_pte_process(struct pmap *, vaddr_t, vaddr_t, pte_callback_t,
179 	uintptr_t);
180 void pmap_segtab_activate(struct pmap *, struct lwp *);
181 void pmap_segtab_deactivate(struct pmap *);
182 void pmap_segtab_init(struct pmap *);
183 void pmap_segtab_destroy(struct pmap *, pte_callback_t, uintptr_t);
184 #ifdef PMAP_HWPAGEWALKER
185 pd_entry_t *pmap_pde_lookup(struct pmap *, vaddr_t, paddr_t *);
186 bool pmap_pdetab_fixup(struct pmap *, vaddr_t);
187 #endif
188 extern kmutex_t pmap_segtab_lock;
189 #endif /* _KERNEL */
190 
191 #ifdef MULTIPROCESSOR
192 #include <sys/kcpuset.h>
193 #endif
194 #include <uvm/pmap/pmap_tlb.h>
195 
196 /*
197  * Machine dependent pmap structure.
198  */
199 struct pmap {
200 	struct uvm_object	pm_uobject;
201 #define pm_refcnt		pm_uobject.uo_refs /* pmap reference count */
202 #define pm_pvp_list		pm_uobject.memq
203 
204 	krwlock_t		pm_obj_lock;	/* lock for pm_uobject */
205 #define pm_lock pm_uobject.vmobjlock
206 
207 	struct pglist		pm_ppg_list;
208 #if defined(PMAP_HWPAGEWALKER)
209 	struct pglist		pm_pdetab_list;
210 #endif
211 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
212 	struct pglist		pm_segtab_list;
213 #endif
214 #ifdef MULTIPROCESSOR
215 	kcpuset_t		*pm_active;	/* pmap was active on ... */
216 	kcpuset_t		*pm_onproc;	/* pmap is active on ... */
217 	volatile u_int		pm_shootdown_pending;
218 #endif
219 #if defined(PMAP_HWPAGEWALKER)
220 	pmap_pdetab_t *		pm_pdetab;	/* pointer to HW PDEs */
221 #endif
222 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
223 	pmap_segtab_t *		pm_segtab;	/* pointers to pages of PTEs; or  */
224 						/* virtual shadow of HW PDEs */
225 #endif
226 	u_int			pm_flags;
227 #define	PMAP_DEFERRED_ACTIVATE	__BIT(0)
228 	struct pmap_statistics	pm_stats;	/* pmap statistics */
229 	vaddr_t			pm_minaddr;
230 	vaddr_t			pm_maxaddr;
231 #ifdef __HAVE_PMAP_MD
232 	struct pmap_md		pm_md;
233 #endif
234 	struct pmap_asid_info	pm_pai[1];
235 };
236 
237 
238 #ifdef	_KERNEL
239 static inline void
240 pmap_lock(struct pmap *pm)
241 {
242 
243 	rw_enter(pm->pm_lock, RW_WRITER);
244 }
245 
246 static inline void
247 pmap_unlock(struct pmap *pm)
248 {
249 
250 	rw_exit(pm->pm_lock);
251 }
252 
253 struct pmap_kernel {
254 	struct pmap kernel_pmap;
255 #if defined(MULTIPROCESSOR) && PMAP_TLB_MAX > 1
256 	struct pmap_asid_info kernel_pai[PMAP_TLB_MAX-1];
257 #endif
258 };
259 
260 struct pmap_limits {
261 	paddr_t avail_start;
262 	paddr_t avail_end;
263 	vaddr_t virtual_start;
264 	vaddr_t virtual_end;
265 };
266 
267 /*
268  * Initialize the kernel pmap.
269  */
270 #ifdef MULTIPROCESSOR
271 #define PMAP_SIZE	offsetof(struct pmap, pm_pai[PMAP_TLB_MAX])
272 #else
273 #define PMAP_SIZE	sizeof(struct pmap)
274 #endif
275 
276 /*
277  * The pools from which pmap structures and sub-structures are allocated.
278  */
279 extern struct pool pmap_pmap_pool;
280 extern struct pool pmap_pv_pool;
281 extern struct pool_allocator pmap_pv_page_allocator;
282 
283 extern struct pmap_kernel kernel_pmap_store;
284 extern struct pmap_limits pmap_limits;
285 
286 extern u_int pmap_page_colormask;
287 
288 /*
289  * The current top of kernel VM
290  */
291 extern vaddr_t pmap_curmaxkvaddr;
292 
293 #if defined(PMAP_HWPAGEWALKER)
294 extern pmap_pdetab_t pmap_kern_pdetab;
295 #else
296 extern pmap_segtab_t pmap_kern_segtab;
297 #endif
298 
299 #define	pmap_wired_count(pmap) 	((pmap)->pm_stats.wired_count)
300 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
301 
302 bool	pmap_remove_all(pmap_t);
303 void	pmap_set_modified(paddr_t);
304 bool	pmap_page_clear_attributes(struct vm_page_md *, u_long);
305 void	pmap_page_set_attributes(struct vm_page_md *, u_long);
306 void	pmap_pvlist_lock_init(size_t);
307 #ifdef PMAP_VIRTUAL_CACHE_ALIASES
308 void	pmap_page_cache(struct vm_page_md *, bool);
309 #endif
310 
311 #if defined(__HAVE_PMAP_PV_TRACK) && !defined(PMAP_PV_TRACK_ONLY_STUBS)
312 void	pmap_pv_protect(paddr_t, vm_prot_t);
313 #endif
314 
315 #define	PMAP_WB		0
316 #define	PMAP_WBINV	1
317 #define	PMAP_INV	2
318 
319 kmutex_t *pmap_pvlist_lock_addr(struct vm_page_md *);
320 
321 #define	PMAP_STEAL_MEMORY	/* enable pmap_steal_memory() */
322 #define	PMAP_GROWKERNEL		/* enable pmap_growkernel() */
323 
324 #define PMAP_COUNT(name)	(pmap_evcnt_##name.ev_count++ + 0)
325 #define PMAP_COUNTER(name, desc) \
326 struct evcnt pmap_evcnt_##name = \
327 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", desc); \
328 EVCNT_ATTACH_STATIC(pmap_evcnt_##name)
329 
330 
331 static inline pt_entry_t *
332 kvtopte(vaddr_t va)
333 {
334 
335 	return pmap_pte_lookup(pmap_kernel(), va);
336 }
337 
338 /* for ddb */
339 void pmap_db_pmap_print(struct pmap *, void (*)(const char *, ...) __printflike(1, 2));
340 void pmap_db_mdpg_print(struct vm_page *, void (*)(const char *, ...) __printflike(1, 2));
341 
342 #if defined(EFI_RUNTIME)
343 struct pmap *
344 	pmap_efirt(void);
345 
346 #define pmap_activate_efirt()	pmap_md_activate_efirt()
347 #define pmap_deactivate_efirt()	pmap_md_deactivate_efirt()
348 
349 #endif
350 
351 #endif	/* _KERNEL */
352 #endif	/* _UVM_PMAP_PMAP_H_ */
353