xref: /netbsd-src/sys/uvm/pmap/pmap.c (revision fbd0dae415553fc073345e3acf518445edd67324)
1*fbd0dae4Sskrll /*	$NetBSD: pmap.c,v 1.80 2024/05/06 07:18:19 skrll Exp $	*/
2b1425120Schristos 
3b1425120Schristos /*-
4b1425120Schristos  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
5b1425120Schristos  * All rights reserved.
6b1425120Schristos  *
7b1425120Schristos  * This code is derived from software contributed to The NetBSD Foundation
8b1425120Schristos  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9b1425120Schristos  * NASA Ames Research Center and by Chris G. Demetriou.
10b1425120Schristos  *
11b1425120Schristos  * Redistribution and use in source and binary forms, with or without
12b1425120Schristos  * modification, are permitted provided that the following conditions
13b1425120Schristos  * are met:
14b1425120Schristos  * 1. Redistributions of source code must retain the above copyright
15b1425120Schristos  *    notice, this list of conditions and the following disclaimer.
16b1425120Schristos  * 2. Redistributions in binary form must reproduce the above copyright
17b1425120Schristos  *    notice, this list of conditions and the following disclaimer in the
18b1425120Schristos  *    documentation and/or other materials provided with the distribution.
19b1425120Schristos  *
20b1425120Schristos  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21b1425120Schristos  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22b1425120Schristos  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23b1425120Schristos  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24b1425120Schristos  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25b1425120Schristos  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26b1425120Schristos  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27b1425120Schristos  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28b1425120Schristos  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29b1425120Schristos  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30b1425120Schristos  * POSSIBILITY OF SUCH DAMAGE.
31b1425120Schristos  */
32b1425120Schristos 
33b1425120Schristos /*
34b1425120Schristos  * Copyright (c) 1992, 1993
35b1425120Schristos  *	The Regents of the University of California.  All rights reserved.
36b1425120Schristos  *
37b1425120Schristos  * This code is derived from software contributed to Berkeley by
38b1425120Schristos  * the Systems Programming Group of the University of Utah Computer
39b1425120Schristos  * Science Department and Ralph Campbell.
40b1425120Schristos  *
41b1425120Schristos  * Redistribution and use in source and binary forms, with or without
42b1425120Schristos  * modification, are permitted provided that the following conditions
43b1425120Schristos  * are met:
44b1425120Schristos  * 1. Redistributions of source code must retain the above copyright
45b1425120Schristos  *    notice, this list of conditions and the following disclaimer.
46b1425120Schristos  * 2. Redistributions in binary form must reproduce the above copyright
47b1425120Schristos  *    notice, this list of conditions and the following disclaimer in the
48b1425120Schristos  *    documentation and/or other materials provided with the distribution.
49b1425120Schristos  * 3. Neither the name of the University nor the names of its contributors
50b1425120Schristos  *    may be used to endorse or promote products derived from this software
51b1425120Schristos  *    without specific prior written permission.
52b1425120Schristos  *
53b1425120Schristos  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54b1425120Schristos  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55b1425120Schristos  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56b1425120Schristos  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57b1425120Schristos  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58b1425120Schristos  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59b1425120Schristos  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60b1425120Schristos  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61b1425120Schristos  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62b1425120Schristos  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63b1425120Schristos  * SUCH DAMAGE.
64b1425120Schristos  *
65b1425120Schristos  *	@(#)pmap.c	8.4 (Berkeley) 1/26/94
66b1425120Schristos  */
67b1425120Schristos 
68b1425120Schristos #include <sys/cdefs.h>
69b1425120Schristos 
70*fbd0dae4Sskrll __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.80 2024/05/06 07:18:19 skrll Exp $");
71b1425120Schristos 
72b1425120Schristos /*
73b1425120Schristos  *	Manages physical address maps.
74b1425120Schristos  *
75b1425120Schristos  *	In addition to hardware address maps, this
76b1425120Schristos  *	module is called upon to provide software-use-only
77b1425120Schristos  *	maps which may or may not be stored in the same
78b1425120Schristos  *	form as hardware maps.  These pseudo-maps are
79b1425120Schristos  *	used to store intermediate results from copy
80b1425120Schristos  *	operations to and from address spaces.
81b1425120Schristos  *
82b1425120Schristos  *	Since the information managed by this module is
83b1425120Schristos  *	also stored by the logical address mapping module,
84b1425120Schristos  *	this module may throw away valid virtual-to-physical
85b1425120Schristos  *	mappings at almost any time.  However, invalidations
86b1425120Schristos  *	of virtual-to-physical mappings must be done as
87b1425120Schristos  *	requested.
88b1425120Schristos  *
89b1425120Schristos  *	In order to cope with hardware architectures which
90b1425120Schristos  *	make virtual-to-physical map invalidates expensive,
91b1425120Schristos  *	this module may delay invalidate or reduced protection
92b1425120Schristos  *	operations until such time as they are actually
93b1425120Schristos  *	necessary.  This module is given full information as
94b1425120Schristos  *	to which processors are currently using which maps,
95b1425120Schristos  *	and to when physical maps must be made correct.
96b1425120Schristos  */
97b1425120Schristos 
98196ee94dSskrll #include "opt_ddb.h"
998d974145Sskrll #include "opt_efi.h"
100b1425120Schristos #include "opt_modular.h"
101b1425120Schristos #include "opt_multiprocessor.h"
102b1425120Schristos #include "opt_sysv.h"
103196ee94dSskrll #include "opt_uvmhist.h"
104b1425120Schristos 
105b1425120Schristos #define __PMAP_PRIVATE
106b1425120Schristos 
107b1425120Schristos #include <sys/param.h>
1089986a045Sskrll 
109a99e7efcSskrll #include <sys/asan.h>
1105528d7fdSmatt #include <sys/atomic.h>
111b1425120Schristos #include <sys/buf.h>
1125528d7fdSmatt #include <sys/cpu.h>
1135528d7fdSmatt #include <sys/mutex.h>
114b1425120Schristos #include <sys/pool.h>
115b1425120Schristos 
116b1425120Schristos #include <uvm/uvm.h>
1173b1622faScherry #include <uvm/uvm_physseg.h>
11831d27c36Sskrll #include <uvm/pmap/pmap_pvt.h>
119b1425120Schristos 
1205528d7fdSmatt #if defined(MULTIPROCESSOR) && defined(PMAP_VIRTUAL_CACHE_ALIASES) \
1215528d7fdSmatt     && !defined(PMAP_NO_PV_UNCACHED)
1225528d7fdSmatt #error PMAP_VIRTUAL_CACHE_ALIASES with MULTIPROCESSOR requires \
1235528d7fdSmatt  PMAP_NO_PV_UNCACHED to be defined
1245528d7fdSmatt #endif
125b1425120Schristos 
126e9065004Srin #if defined(PMAP_PV_TRACK_ONLY_STUBS)
127e9065004Srin #undef	__HAVE_PMAP_PV_TRACK
128e9065004Srin #endif
129e9065004Srin 
130b1425120Schristos PMAP_COUNTER(remove_kernel_calls, "remove kernel calls");
131b1425120Schristos PMAP_COUNTER(remove_kernel_pages, "kernel pages unmapped");
132b1425120Schristos PMAP_COUNTER(remove_user_calls, "remove user calls");
133b1425120Schristos PMAP_COUNTER(remove_user_pages, "user pages unmapped");
134b1425120Schristos PMAP_COUNTER(remove_flushes, "remove cache flushes");
135b1425120Schristos PMAP_COUNTER(remove_tlb_ops, "remove tlb ops");
136b1425120Schristos PMAP_COUNTER(remove_pvfirst, "remove pv first");
137b1425120Schristos PMAP_COUNTER(remove_pvsearch, "remove pv search");
138b1425120Schristos 
139b1425120Schristos PMAP_COUNTER(prefer_requests, "prefer requests");
140b1425120Schristos PMAP_COUNTER(prefer_adjustments, "prefer adjustments");
141b1425120Schristos 
142b1425120Schristos PMAP_COUNTER(idlezeroed_pages, "pages idle zeroed");
143b1425120Schristos 
144b1425120Schristos PMAP_COUNTER(kenter_pa, "kernel fast mapped pages");
145b1425120Schristos PMAP_COUNTER(kenter_pa_bad, "kernel fast mapped pages (bad color)");
146b1425120Schristos PMAP_COUNTER(kenter_pa_unmanaged, "kernel fast mapped unmanaged pages");
147b1425120Schristos PMAP_COUNTER(kremove_pages, "kernel fast unmapped pages");
148b1425120Schristos 
149b1425120Schristos PMAP_COUNTER(page_cache_evictions, "pages changed to uncacheable");
150b1425120Schristos PMAP_COUNTER(page_cache_restorations, "pages changed to cacheable");
151b1425120Schristos 
152b1425120Schristos PMAP_COUNTER(kernel_mappings_bad, "kernel pages mapped (bad color)");
153b1425120Schristos PMAP_COUNTER(user_mappings_bad, "user pages mapped (bad color)");
154b1425120Schristos PMAP_COUNTER(kernel_mappings, "kernel pages mapped");
155b1425120Schristos PMAP_COUNTER(user_mappings, "user pages mapped");
156b1425120Schristos PMAP_COUNTER(user_mappings_changed, "user mapping changed");
157b1425120Schristos PMAP_COUNTER(kernel_mappings_changed, "kernel mapping changed");
158b1425120Schristos PMAP_COUNTER(uncached_mappings, "uncached pages mapped");
159b1425120Schristos PMAP_COUNTER(unmanaged_mappings, "unmanaged pages mapped");
16031d27c36Sskrll PMAP_COUNTER(pvtracked_mappings, "pv-tracked unmanaged pages mapped");
1618d974145Sskrll PMAP_COUNTER(efirt_mappings, "EFI RT pages mapped");
162b1425120Schristos PMAP_COUNTER(managed_mappings, "managed pages mapped");
163b1425120Schristos PMAP_COUNTER(mappings, "pages mapped");
164b1425120Schristos PMAP_COUNTER(remappings, "pages remapped");
165b1425120Schristos PMAP_COUNTER(unmappings, "pages unmapped");
166b1425120Schristos PMAP_COUNTER(primary_mappings, "page initial mappings");
167b1425120Schristos PMAP_COUNTER(primary_unmappings, "page final unmappings");
168b1425120Schristos PMAP_COUNTER(tlb_hit, "page mapping");
169b1425120Schristos 
170b1425120Schristos PMAP_COUNTER(exec_mappings, "exec pages mapped");
171b1425120Schristos PMAP_COUNTER(exec_synced_mappings, "exec pages synced");
172b1425120Schristos PMAP_COUNTER(exec_synced_remove, "exec pages synced (PR)");
173b1425120Schristos PMAP_COUNTER(exec_synced_clear_modify, "exec pages synced (CM)");
174b1425120Schristos PMAP_COUNTER(exec_synced_page_protect, "exec pages synced (PP)");
175b1425120Schristos PMAP_COUNTER(exec_synced_protect, "exec pages synced (P)");
176b1425120Schristos PMAP_COUNTER(exec_uncached_page_protect, "exec pages uncached (PP)");
177b1425120Schristos PMAP_COUNTER(exec_uncached_clear_modify, "exec pages uncached (CM)");
178b1425120Schristos PMAP_COUNTER(exec_uncached_zero_page, "exec pages uncached (ZP)");
179b1425120Schristos PMAP_COUNTER(exec_uncached_copy_page, "exec pages uncached (CP)");
180b1425120Schristos PMAP_COUNTER(exec_uncached_remove, "exec pages uncached (PR)");
181b1425120Schristos 
182b1425120Schristos PMAP_COUNTER(create, "creates");
183b1425120Schristos PMAP_COUNTER(reference, "references");
184b1425120Schristos PMAP_COUNTER(dereference, "dereferences");
185b1425120Schristos PMAP_COUNTER(destroy, "destroyed");
186b1425120Schristos PMAP_COUNTER(activate, "activations");
187b1425120Schristos PMAP_COUNTER(deactivate, "deactivations");
188b1425120Schristos PMAP_COUNTER(update, "updates");
189b1425120Schristos #ifdef MULTIPROCESSOR
190b1425120Schristos PMAP_COUNTER(shootdown_ipis, "shootdown IPIs");
191b1425120Schristos #endif
192b1425120Schristos PMAP_COUNTER(unwire, "unwires");
193b1425120Schristos PMAP_COUNTER(copy, "copies");
194b1425120Schristos PMAP_COUNTER(clear_modify, "clear_modifies");
195b1425120Schristos PMAP_COUNTER(protect, "protects");
196b1425120Schristos PMAP_COUNTER(page_protect, "page_protects");
197b1425120Schristos 
198b1425120Schristos #define PMAP_ASID_RESERVED 0
199b1425120Schristos CTASSERT(PMAP_ASID_RESERVED == 0);
200b1425120Schristos 
201196ee94dSskrll #ifdef PMAP_HWPAGEWALKER
202196ee94dSskrll #ifndef PMAP_PDETAB_ALIGN
203196ee94dSskrll #define PMAP_PDETAB_ALIGN	/* nothing */
204196ee94dSskrll #endif
205196ee94dSskrll 
206196ee94dSskrll #ifdef _LP64
207196ee94dSskrll pmap_pdetab_t	pmap_kstart_pdetab PMAP_PDETAB_ALIGN; /* first mid-level pdetab for kernel */
208196ee94dSskrll #endif
209196ee94dSskrll pmap_pdetab_t	pmap_kern_pdetab PMAP_PDETAB_ALIGN;
210196ee94dSskrll #endif
211196ee94dSskrll 
212196ee94dSskrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
2135528d7fdSmatt #ifndef PMAP_SEGTAB_ALIGN
2145528d7fdSmatt #define PMAP_SEGTAB_ALIGN	/* nothing */
215b1425120Schristos #endif
2165528d7fdSmatt #ifdef _LP64
2175528d7fdSmatt pmap_segtab_t	pmap_kstart_segtab PMAP_SEGTAB_ALIGN; /* first mid-level segtab for kernel */
2185528d7fdSmatt #endif
2195528d7fdSmatt pmap_segtab_t	pmap_kern_segtab PMAP_SEGTAB_ALIGN = { /* top level segtab for kernel */
2205528d7fdSmatt #ifdef _LP64
221652d1899Sskrll 	.seg_seg[(VM_MIN_KERNEL_ADDRESS >> XSEGSHIFT) & (NSEGPG - 1)] = &pmap_kstart_segtab,
2225528d7fdSmatt #endif
2235528d7fdSmatt };
224196ee94dSskrll #endif
225b1425120Schristos 
226b1425120Schristos struct pmap_kernel kernel_pmap_store = {
227b1425120Schristos 	.kernel_pmap = {
22835768640Sskrll 		.pm_refcnt = 1,
229196ee94dSskrll #ifdef PMAP_HWPAGEWALKER
230196ee94dSskrll 		.pm_pdetab = PMAP_INVALID_PDETAB_ADDRESS,
231196ee94dSskrll #endif
232196ee94dSskrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
2335528d7fdSmatt 		.pm_segtab = &pmap_kern_segtab,
234196ee94dSskrll #endif
235b1425120Schristos 		.pm_minaddr = VM_MIN_KERNEL_ADDRESS,
236b1425120Schristos 		.pm_maxaddr = VM_MAX_KERNEL_ADDRESS,
237b1425120Schristos 	},
238b1425120Schristos };
239b1425120Schristos 
240b1425120Schristos struct pmap * const kernel_pmap_ptr = &kernel_pmap_store.kernel_pmap;
241b1425120Schristos 
2428d974145Sskrll #if defined(EFI_RUNTIME)
2438d974145Sskrll static struct pmap efirt_pmap;
2448d974145Sskrll 
2458d974145Sskrll pmap_t
pmap_efirt(void)2468d974145Sskrll pmap_efirt(void)
2478d974145Sskrll {
2488d974145Sskrll 	return &efirt_pmap;
2498d974145Sskrll }
2508d974145Sskrll #else
2518d974145Sskrll static inline pt_entry_t
pte_make_enter_efirt(paddr_t pa,vm_prot_t prot,u_int flags)2528d974145Sskrll pte_make_enter_efirt(paddr_t pa, vm_prot_t prot, u_int flags)
2538d974145Sskrll {
2548d974145Sskrll 	panic("not supported");
2558d974145Sskrll }
2568d974145Sskrll #endif
2578d974145Sskrll 
258a99e7efcSskrll /* The current top of kernel VM - gets updated by pmap_growkernel */
259a99e7efcSskrll vaddr_t pmap_curmaxkvaddr;
260a99e7efcSskrll 
2615528d7fdSmatt struct pmap_limits pmap_limits = {	/* VA and PA limits */
262b9c0826dSmatt 	.virtual_start = VM_MIN_KERNEL_ADDRESS,
263a99e7efcSskrll 	.virtual_end = VM_MAX_KERNEL_ADDRESS,
264b9c0826dSmatt };
265b1425120Schristos 
266b1425120Schristos #ifdef UVMHIST
267b1425120Schristos static struct kern_history_ent pmapexechistbuf[10000];
268b1425120Schristos static struct kern_history_ent pmaphistbuf[10000];
269196ee94dSskrll static struct kern_history_ent pmapxtabhistbuf[5000];
270f405a45aSmrg UVMHIST_DEFINE(pmapexechist) = UVMHIST_INITIALIZER(pmapexechist, pmapexechistbuf);
271f405a45aSmrg UVMHIST_DEFINE(pmaphist) = UVMHIST_INITIALIZER(pmaphist, pmaphistbuf);
272196ee94dSskrll UVMHIST_DEFINE(pmapxtabhist) = UVMHIST_INITIALIZER(pmapxtabhist, pmapxtabhistbuf);
273b1425120Schristos #endif
274b1425120Schristos 
275b1425120Schristos /*
276b1425120Schristos  * The pools from which pmap structures and sub-structures are allocated.
277b1425120Schristos  */
278b1425120Schristos struct pool pmap_pmap_pool;
279b1425120Schristos struct pool pmap_pv_pool;
280b1425120Schristos 
281b1425120Schristos #ifndef PMAP_PV_LOWAT
282b1425120Schristos #define	PMAP_PV_LOWAT	16
283b1425120Schristos #endif
284b1425120Schristos int	pmap_pv_lowat = PMAP_PV_LOWAT;
285b1425120Schristos 
286b1425120Schristos bool	pmap_initialized = false;
287b1425120Schristos #define	PMAP_PAGE_COLOROK_P(a, b) \
288b1425120Schristos 		((((int)(a) ^ (int)(b)) & pmap_page_colormask) == 0)
289b1425120Schristos u_int	pmap_page_colormask;
290b1425120Schristos 
2915528d7fdSmatt #define PAGE_IS_MANAGED(pa)	(pmap_initialized && uvm_pageismanaged(pa))
292b1425120Schristos 
293b1425120Schristos #define PMAP_IS_ACTIVE(pm)						\
294b1425120Schristos 	((pm) == pmap_kernel() ||					\
295b1425120Schristos 	 (pm) == curlwp->l_proc->p_vmspace->vm_map.pmap)
296b1425120Schristos 
297b1425120Schristos /* Forward function declarations */
29831d27c36Sskrll void pmap_page_remove(struct vm_page_md *);
2995528d7fdSmatt static void pmap_pvlist_check(struct vm_page_md *);
300b1425120Schristos void pmap_remove_pv(pmap_t, vaddr_t, struct vm_page *, bool);
30131d27c36Sskrll void pmap_enter_pv(pmap_t, vaddr_t, paddr_t, struct vm_page_md *, pt_entry_t *, u_int);
302b1425120Schristos 
303b1425120Schristos /*
304b1425120Schristos  * PV table management functions.
305b1425120Schristos  */
306b1425120Schristos void	*pmap_pv_page_alloc(struct pool *, int);
307b1425120Schristos void	pmap_pv_page_free(struct pool *, void *);
308b1425120Schristos 
309b1425120Schristos struct pool_allocator pmap_pv_page_allocator = {
310b1425120Schristos 	pmap_pv_page_alloc, pmap_pv_page_free, 0,
311b1425120Schristos };
312b1425120Schristos 
313b1425120Schristos #define	pmap_pv_alloc()		pool_get(&pmap_pv_pool, PR_NOWAIT)
314b1425120Schristos #define	pmap_pv_free(pv)	pool_put(&pmap_pv_pool, (pv))
315b1425120Schristos 
31629807ee5Sthorpej #ifndef PMAP_NEED_TLB_MISS_LOCK
31729807ee5Sthorpej 
31829807ee5Sthorpej #if defined(PMAP_MD_NEED_TLB_MISS_LOCK) || defined(DEBUG)
31929807ee5Sthorpej #define	PMAP_NEED_TLB_MISS_LOCK
32029807ee5Sthorpej #endif /* PMAP_MD_NEED_TLB_MISS_LOCK || DEBUG */
32129807ee5Sthorpej 
32229807ee5Sthorpej #endif /* PMAP_NEED_TLB_MISS_LOCK */
32329807ee5Sthorpej 
32429807ee5Sthorpej #ifdef PMAP_NEED_TLB_MISS_LOCK
32529807ee5Sthorpej 
32629807ee5Sthorpej #ifdef PMAP_MD_NEED_TLB_MISS_LOCK
32729807ee5Sthorpej #define	pmap_tlb_miss_lock_init()	__nothing /* MD code deals with this */
32829807ee5Sthorpej #define	pmap_tlb_miss_lock_enter()	pmap_md_tlb_miss_lock_enter()
32929807ee5Sthorpej #define	pmap_tlb_miss_lock_exit()	pmap_md_tlb_miss_lock_exit()
33029807ee5Sthorpej #else
3319d7b661eSthorpej kmutex_t pmap_tlb_miss_lock		__cacheline_aligned;
33229807ee5Sthorpej 
33329807ee5Sthorpej static void
pmap_tlb_miss_lock_init(void)33429807ee5Sthorpej pmap_tlb_miss_lock_init(void)
33529807ee5Sthorpej {
33629807ee5Sthorpej 	mutex_init(&pmap_tlb_miss_lock, MUTEX_SPIN, IPL_HIGH);
33729807ee5Sthorpej }
33829807ee5Sthorpej 
33929807ee5Sthorpej static inline void
pmap_tlb_miss_lock_enter(void)34029807ee5Sthorpej pmap_tlb_miss_lock_enter(void)
34129807ee5Sthorpej {
34229807ee5Sthorpej 	mutex_spin_enter(&pmap_tlb_miss_lock);
34329807ee5Sthorpej }
34429807ee5Sthorpej 
34529807ee5Sthorpej static inline void
pmap_tlb_miss_lock_exit(void)34629807ee5Sthorpej pmap_tlb_miss_lock_exit(void)
34729807ee5Sthorpej {
34829807ee5Sthorpej 	mutex_spin_exit(&pmap_tlb_miss_lock);
34929807ee5Sthorpej }
35029807ee5Sthorpej #endif /* PMAP_MD_NEED_TLB_MISS_LOCK */
35129807ee5Sthorpej 
35229807ee5Sthorpej #else
35329807ee5Sthorpej 
35429807ee5Sthorpej #define	pmap_tlb_miss_lock_init()	__nothing
35529807ee5Sthorpej #define	pmap_tlb_miss_lock_enter()	__nothing
35629807ee5Sthorpej #define	pmap_tlb_miss_lock_exit()	__nothing
35729807ee5Sthorpej 
35829807ee5Sthorpej #endif /* PMAP_NEED_TLB_MISS_LOCK */
3593012a05aSnonaka 
3605528d7fdSmatt #ifndef MULTIPROCESSOR
3615528d7fdSmatt kmutex_t pmap_pvlist_mutex	__cacheline_aligned;
3625528d7fdSmatt #endif
3635528d7fdSmatt 
3645528d7fdSmatt /*
3655528d7fdSmatt  * Debug functions.
3665528d7fdSmatt  */
3675528d7fdSmatt 
368fb5b4826Sjakllsch #ifdef DEBUG
3691133f344Sskrll 
3702fbbddb8Sskrll bool pmap_stealdebug = false;
3711133f344Sskrll 
3721133f344Sskrll #define DPRINTF(...)							     \
3731133f344Sskrll     do { if (pmap_stealdebug) { printf(__VA_ARGS__); } } while (false)
3741133f344Sskrll 
3755528d7fdSmatt static inline void
pmap_asid_check(pmap_t pm,const char * func)3765528d7fdSmatt pmap_asid_check(pmap_t pm, const char *func)
3775528d7fdSmatt {
3785528d7fdSmatt 	if (!PMAP_IS_ACTIVE(pm))
3795528d7fdSmatt 		return;
3805528d7fdSmatt 
3815528d7fdSmatt 	struct pmap_asid_info * const pai = PMAP_PAI(pm, cpu_tlb_info(curcpu()));
3825528d7fdSmatt 	tlb_asid_t asid = tlb_get_asid();
3835528d7fdSmatt 	if (asid != pai->pai_asid)
3845528d7fdSmatt 		panic("%s: inconsistency for active TLB update: %u <-> %u",
3855528d7fdSmatt 		    func, asid, pai->pai_asid);
3865528d7fdSmatt }
3871133f344Sskrll #else
3881133f344Sskrll 
3891133f344Sskrll #define DPRINTF(...) __nothing
3901133f344Sskrll 
391fb5b4826Sjakllsch #endif
3925528d7fdSmatt 
3935528d7fdSmatt static void
pmap_addr_range_check(pmap_t pmap,vaddr_t sva,vaddr_t eva,const char * func)3945528d7fdSmatt pmap_addr_range_check(pmap_t pmap, vaddr_t sva, vaddr_t eva, const char *func)
3955528d7fdSmatt {
3965528d7fdSmatt #ifdef DEBUG
3975528d7fdSmatt 	if (pmap == pmap_kernel()) {
3985528d7fdSmatt 		if (sva < VM_MIN_KERNEL_ADDRESS)
3995528d7fdSmatt 			panic("%s: kva %#"PRIxVADDR" not in range",
4005528d7fdSmatt 			    func, sva);
4015528d7fdSmatt 		if (eva >= pmap_limits.virtual_end)
4025528d7fdSmatt 			panic("%s: kva %#"PRIxVADDR" not in range",
4035528d7fdSmatt 			    func, eva);
4045528d7fdSmatt 	} else {
4055528d7fdSmatt 		if (eva > VM_MAXUSER_ADDRESS)
4065528d7fdSmatt 			panic("%s: uva %#"PRIxVADDR" not in range",
4075528d7fdSmatt 			    func, eva);
4085528d7fdSmatt 		pmap_asid_check(pmap, func);
4095528d7fdSmatt 	}
4105528d7fdSmatt #endif
4115528d7fdSmatt }
4125528d7fdSmatt 
413b1425120Schristos /*
414b1425120Schristos  * Misc. functions.
415b1425120Schristos  */
416b1425120Schristos 
417b1425120Schristos bool
pmap_page_clear_attributes(struct vm_page_md * mdpg,u_long clear_attributes)418c41df0aeSskrll pmap_page_clear_attributes(struct vm_page_md *mdpg, u_long clear_attributes)
419b1425120Schristos {
420c41df0aeSskrll 	volatile u_long * const attrp = &mdpg->mdpg_attrs;
421196ee94dSskrll 
422b1425120Schristos #ifdef MULTIPROCESSOR
423b1425120Schristos 	for (;;) {
424c41df0aeSskrll 		u_long old_attr = *attrp;
425b1425120Schristos 		if ((old_attr & clear_attributes) == 0)
426b1425120Schristos 			return false;
427c41df0aeSskrll 		u_long new_attr = old_attr & ~clear_attributes;
4285528d7fdSmatt 		if (old_attr == atomic_cas_ulong(attrp, old_attr, new_attr))
429b1425120Schristos 			return true;
430b1425120Schristos 	}
431b1425120Schristos #else
432c41df0aeSskrll 	u_long old_attr = *attrp;
433b1425120Schristos 	if ((old_attr & clear_attributes) == 0)
434b1425120Schristos 		return false;
435b1425120Schristos 	*attrp &= ~clear_attributes;
436b1425120Schristos 	return true;
437b1425120Schristos #endif
438b1425120Schristos }
439b1425120Schristos 
440b1425120Schristos void
pmap_page_set_attributes(struct vm_page_md * mdpg,u_long set_attributes)441c41df0aeSskrll pmap_page_set_attributes(struct vm_page_md *mdpg, u_long set_attributes)
442b1425120Schristos {
443b1425120Schristos #ifdef MULTIPROCESSOR
4445528d7fdSmatt 	atomic_or_ulong(&mdpg->mdpg_attrs, set_attributes);
445b1425120Schristos #else
446b1425120Schristos 	mdpg->mdpg_attrs |= set_attributes;
447b1425120Schristos #endif
448b1425120Schristos }
449b1425120Schristos 
450b1425120Schristos static void
pmap_page_syncicache(struct vm_page * pg)451b1425120Schristos pmap_page_syncicache(struct vm_page *pg)
452b1425120Schristos {
453a9bc40deSskrll 	UVMHIST_FUNC(__func__);
454a9bc40deSskrll 	UVMHIST_CALLED(pmaphist);
455b1425120Schristos #ifndef MULTIPROCESSOR
4565528d7fdSmatt 	struct pmap * const curpmap = curlwp->l_proc->p_vmspace->vm_map.pmap;
457b1425120Schristos #endif
458b1425120Schristos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
459b1425120Schristos 	pv_entry_t pv = &mdpg->mdpg_first;
460fe1b443aSmatt 	kcpuset_t *onproc;
461fe1b443aSmatt #ifdef MULTIPROCESSOR
462fe1b443aSmatt 	kcpuset_create(&onproc, true);
4635528d7fdSmatt 	KASSERT(onproc != NULL);
464418abeabSmatt #else
465418abeabSmatt 	onproc = NULL;
466fe1b443aSmatt #endif
4675528d7fdSmatt 	VM_PAGEMD_PVLIST_READLOCK(mdpg);
4685528d7fdSmatt 	pmap_pvlist_check(mdpg);
469fe1b443aSmatt 
4705602c37dSskrll 	UVMHIST_LOG(pmaphist, "pv %#jx pv_pmap %#jx", (uintptr_t)pv,
471a9bc40deSskrll 	    (uintptr_t)pv->pv_pmap, 0, 0);
472a9bc40deSskrll 
473b1425120Schristos 	if (pv->pv_pmap != NULL) {
474b1425120Schristos 		for (; pv != NULL; pv = pv->pv_next) {
475b1425120Schristos #ifdef MULTIPROCESSOR
4765602c37dSskrll 			UVMHIST_LOG(pmaphist, "pv %#jx pv_pmap %#jx",
477a9bc40deSskrll 			    (uintptr_t)pv, (uintptr_t)pv->pv_pmap, 0, 0);
478fe1b443aSmatt 			kcpuset_merge(onproc, pv->pv_pmap->pm_onproc);
479fe1b443aSmatt 			if (kcpuset_match(onproc, kcpuset_running)) {
480b1425120Schristos 				break;
481b1425120Schristos 			}
482b1425120Schristos #else
483b1425120Schristos 			if (pv->pv_pmap == curpmap) {
484c591e46dSskrll 				onproc = curcpu()->ci_kcpuset;
485b1425120Schristos 				break;
486b1425120Schristos 			}
487b1425120Schristos #endif
488b1425120Schristos 		}
489b1425120Schristos 	}
4905528d7fdSmatt 	pmap_pvlist_check(mdpg);
491b1425120Schristos 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
492b1425120Schristos 	kpreempt_disable();
49331d27c36Sskrll 	pmap_md_page_syncicache(mdpg, onproc);
4945528d7fdSmatt 	kpreempt_enable();
495fe1b443aSmatt #ifdef MULTIPROCESSOR
496fe1b443aSmatt 	kcpuset_destroy(onproc);
497fe1b443aSmatt #endif
498b1425120Schristos }
499b1425120Schristos 
500b1425120Schristos /*
501b1425120Schristos  * Define the initial bounds of the kernel virtual address space.
502b1425120Schristos  */
503b1425120Schristos void
pmap_virtual_space(vaddr_t * vstartp,vaddr_t * vendp)504b1425120Schristos pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
505b1425120Schristos {
506b9c0826dSmatt 	*vstartp = pmap_limits.virtual_start;
507b9c0826dSmatt 	*vendp = pmap_limits.virtual_end;
508b1425120Schristos }
509b1425120Schristos 
510b1425120Schristos vaddr_t
pmap_growkernel(vaddr_t maxkvaddr)511b1425120Schristos pmap_growkernel(vaddr_t maxkvaddr)
512b1425120Schristos {
513a99e7efcSskrll 	UVMHIST_FUNC(__func__);
514a99e7efcSskrll 	UVMHIST_CALLARGS(pmaphist, "maxkvaddr=%#jx (%#jx)", maxkvaddr,
515a99e7efcSskrll 	    pmap_curmaxkvaddr, 0, 0);
516a99e7efcSskrll 
517a99e7efcSskrll 	vaddr_t virtual_end = pmap_curmaxkvaddr;
518b1425120Schristos 	maxkvaddr = pmap_round_seg(maxkvaddr) - 1;
519b1425120Schristos 
520b1425120Schristos 	/*
521a99e7efcSskrll 	 * Don't exceed VM_MAX_KERNEL_ADDRESS!
522a99e7efcSskrll 	 */
523a99e7efcSskrll 	if (maxkvaddr == 0 || maxkvaddr > VM_MAX_KERNEL_ADDRESS)
524a99e7efcSskrll 		maxkvaddr = VM_MAX_KERNEL_ADDRESS;
525a99e7efcSskrll 
526a99e7efcSskrll 	/*
527b1425120Schristos 	 * Reserve PTEs for the new KVA space.
528b1425120Schristos 	 */
529b1425120Schristos 	for (; virtual_end < maxkvaddr; virtual_end += NBSEG) {
530b1425120Schristos 		pmap_pte_reserve(pmap_kernel(), virtual_end, 0);
531b1425120Schristos 	}
532b1425120Schristos 
533a99e7efcSskrll 	kasan_shadow_map((void *)pmap_curmaxkvaddr,
534a99e7efcSskrll 	    (size_t)(virtual_end - pmap_curmaxkvaddr));
535b1425120Schristos 
536b1425120Schristos 	/*
537b1425120Schristos 	 * Update new end.
538b1425120Schristos 	 */
539a99e7efcSskrll 	pmap_curmaxkvaddr = virtual_end;
540a99e7efcSskrll 
541a99e7efcSskrll 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
542a99e7efcSskrll 
543b1425120Schristos 	return virtual_end;
544b1425120Schristos }
545b1425120Schristos 
546b1425120Schristos /*
547b1425120Schristos  * Bootstrap memory allocator (alternative to vm_bootstrap_steal_memory()).
548b1425120Schristos  * This function allows for early dynamic memory allocation until the virtual
549b1425120Schristos  * memory system has been bootstrapped.  After that point, either kmem_alloc
550b1425120Schristos  * or malloc should be used.  This function works by stealing pages from the
551b1425120Schristos  * (to be) managed page pool, then implicitly mapping the pages (by using
552ba6c36b1Sskrll  * their direct mapped addresses) and zeroing them.
553b1425120Schristos  *
554b1425120Schristos  * It may be used once the physical memory segments have been pre-loaded
555b1425120Schristos  * into the vm_physmem[] array.  Early memory allocation MUST use this
556b1425120Schristos  * interface!  This cannot be used after vm_page_startup(), and will
557b1425120Schristos  * generate a panic if tried.
558b1425120Schristos  *
559b1425120Schristos  * Note that this memory will never be freed, and in essence it is wired
560b1425120Schristos  * down.
561b1425120Schristos  *
562b1425120Schristos  * We must adjust *vstartp and/or *vendp iff we use address space
563b1425120Schristos  * from the kernel virtual address range defined by pmap_virtual_space().
564b1425120Schristos  */
565b1425120Schristos vaddr_t
pmap_steal_memory(vsize_t size,vaddr_t * vstartp,vaddr_t * vendp)566b1425120Schristos pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
567b1425120Schristos {
5685528d7fdSmatt 	size_t npgs;
569b1425120Schristos 	paddr_t pa;
570b1425120Schristos 	vaddr_t va;
5713b1622faScherry 
572b9250ac7Sskrll 	uvm_physseg_t maybe_bank = UVM_PHYSSEG_TYPE_INVALID;
573b1425120Schristos 
574b1425120Schristos 	size = round_page(size);
575b1425120Schristos 	npgs = atop(size);
576b1425120Schristos 
5771133f344Sskrll 	DPRINTF("%s: need %zu pages\n", __func__, npgs);
5785528d7fdSmatt 
5793b1622faScherry 	for (uvm_physseg_t bank = uvm_physseg_get_first();
5803b1622faScherry 	     uvm_physseg_valid_p(bank);
5813b1622faScherry 	     bank = uvm_physseg_get_next(bank)) {
5823b1622faScherry 
583b1425120Schristos 		if (uvm.page_init_done == true)
584b1425120Schristos 			panic("pmap_steal_memory: called _after_ bootstrap");
585b1425120Schristos 
5861133f344Sskrll 		DPRINTF("%s: seg %"PRIxPHYSSEG": %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR"\n",
5875528d7fdSmatt 		    __func__, bank,
5883b1622faScherry 		    uvm_physseg_get_avail_start(bank), uvm_physseg_get_start(bank),
5893b1622faScherry 		    uvm_physseg_get_avail_end(bank), uvm_physseg_get_end(bank));
590b1425120Schristos 
5913b1622faScherry 		if (uvm_physseg_get_avail_start(bank) != uvm_physseg_get_start(bank)
5923b1622faScherry 		    || uvm_physseg_get_avail_start(bank) >= uvm_physseg_get_avail_end(bank)) {
5931133f344Sskrll 			DPRINTF("%s: seg %"PRIxPHYSSEG": bad start\n", __func__, bank);
594b1425120Schristos 			continue;
5955528d7fdSmatt 		}
5965528d7fdSmatt 
5973b1622faScherry 		if (uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank) < npgs) {
5981133f344Sskrll 			DPRINTF("%s: seg %"PRIxPHYSSEG": too small for %zu pages\n",
5995528d7fdSmatt 			    __func__, bank, npgs);
6005528d7fdSmatt 			continue;
6015528d7fdSmatt 		}
6025528d7fdSmatt 
6033b1622faScherry 		if (!pmap_md_ok_to_steal_p(bank, npgs)) {
6045528d7fdSmatt 			continue;
6055528d7fdSmatt 		}
6065528d7fdSmatt 
6075528d7fdSmatt 		/*
6085528d7fdSmatt 		 * Always try to allocate from the segment with the least
6095528d7fdSmatt 		 * amount of space left.
6105528d7fdSmatt 		 */
6113b1622faScherry #define VM_PHYSMEM_SPACE(b)	((uvm_physseg_get_avail_end(b)) - (uvm_physseg_get_avail_start(b)))
6123b1622faScherry 		if (uvm_physseg_valid_p(maybe_bank) == false
6133b1622faScherry 		    || VM_PHYSMEM_SPACE(bank) < VM_PHYSMEM_SPACE(maybe_bank)) {
6145528d7fdSmatt 			maybe_bank = bank;
6155528d7fdSmatt 		}
6165528d7fdSmatt 	}
6175528d7fdSmatt 
6183b1622faScherry 	if (uvm_physseg_valid_p(maybe_bank)) {
6193b1622faScherry 		const uvm_physseg_t bank = maybe_bank;
620b1425120Schristos 
621b1425120Schristos 		/*
622b1425120Schristos 		 * There are enough pages here; steal them!
623b1425120Schristos 		 */
6243b1622faScherry 		pa = ptoa(uvm_physseg_get_start(bank));
6253b1622faScherry 		uvm_physseg_unplug(atop(pa), npgs);
626b1425120Schristos 
6271133f344Sskrll 		DPRINTF("%s: seg %"PRIxPHYSSEG": %zu pages stolen (%#"PRIxPADDR" left)\n",
6283b1622faScherry 		    __func__, bank, npgs, VM_PHYSMEM_SPACE(bank));
629b1425120Schristos 
630b1425120Schristos 		va = pmap_md_map_poolpage(pa, size);
631b1425120Schristos 		memset((void *)va, 0, size);
632b1425120Schristos 		return va;
633b1425120Schristos 	}
634b1425120Schristos 
635b1425120Schristos 	/*
636b1425120Schristos 	 * If we got here, there was no memory left.
637b1425120Schristos 	 */
6385528d7fdSmatt 	panic("pmap_steal_memory: no memory to steal %zu pages", npgs);
639b1425120Schristos }
640b1425120Schristos 
641b1425120Schristos /*
64229807ee5Sthorpej  *	Bootstrap the system enough to run with virtual memory.
64329807ee5Sthorpej  *	(Common routine called by machine-dependent bootstrap code.)
64429807ee5Sthorpej  */
64529807ee5Sthorpej void
pmap_bootstrap_common(void)64629807ee5Sthorpej pmap_bootstrap_common(void)
64729807ee5Sthorpej {
648196ee94dSskrll 	UVMHIST_LINK_STATIC(pmapexechist);
649196ee94dSskrll 	UVMHIST_LINK_STATIC(pmaphist);
650196ee94dSskrll 	UVMHIST_LINK_STATIC(pmapxtabhist);
651196ee94dSskrll 
652196ee94dSskrll 	static const struct uvm_pagerops pmap_pager = {
653196ee94dSskrll 		/* nothing */
654196ee94dSskrll 	};
655196ee94dSskrll 
656196ee94dSskrll 	pmap_t pm = pmap_kernel();
657196ee94dSskrll 
658196ee94dSskrll 	rw_init(&pm->pm_obj_lock);
659196ee94dSskrll 	uvm_obj_init(&pm->pm_uobject, &pmap_pager, false, 1);
660196ee94dSskrll 	uvm_obj_setlock(&pm->pm_uobject, &pm->pm_obj_lock);
661196ee94dSskrll 
662196ee94dSskrll 	TAILQ_INIT(&pm->pm_ppg_list);
663196ee94dSskrll 
664196ee94dSskrll #if defined(PMAP_HWPAGEWALKER)
665196ee94dSskrll 	TAILQ_INIT(&pm->pm_pdetab_list);
666196ee94dSskrll #endif
667196ee94dSskrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
668196ee94dSskrll 	TAILQ_INIT(&pm->pm_segtab_list);
669196ee94dSskrll #endif
670196ee94dSskrll 
671a856f989Sskrll #if defined(EFI_RUNTIME)
672a856f989Sskrll 
673a856f989Sskrll 	const pmap_t efipm = pmap_efirt();
674a856f989Sskrll 	struct pmap_asid_info * const efipai = PMAP_PAI(efipm, cpu_tlb_info(ci));
675a856f989Sskrll 
676a856f989Sskrll 	rw_init(&efipm->pm_obj_lock);
677a856f989Sskrll 	uvm_obj_init(&efipm->pm_uobject, &pmap_pager, false, 1);
678a856f989Sskrll 	uvm_obj_setlock(&efipm->pm_uobject, &efipm->pm_obj_lock);
679a856f989Sskrll 
680a856f989Sskrll 	efipai->pai_asid = KERNEL_PID;
681a856f989Sskrll 
682a856f989Sskrll 	TAILQ_INIT(&efipm->pm_ppg_list);
683a856f989Sskrll 
684a856f989Sskrll #if defined(PMAP_HWPAGEWALKER)
685a856f989Sskrll 	TAILQ_INIT(&efipm->pm_pdetab_list);
686a856f989Sskrll #endif
687a856f989Sskrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
688a856f989Sskrll 	TAILQ_INIT(&efipm->pm_segtab_list);
689a856f989Sskrll #endif
690a856f989Sskrll 
691a856f989Sskrll #endif
692a856f989Sskrll 
693a856f989Sskrll 	/*
694a856f989Sskrll 	 * Initialize the segtab lock.
695a856f989Sskrll 	 */
696a856f989Sskrll 	mutex_init(&pmap_segtab_lock, MUTEX_DEFAULT, IPL_HIGH);
697a856f989Sskrll 
69829807ee5Sthorpej 	pmap_tlb_miss_lock_init();
69929807ee5Sthorpej }
70029807ee5Sthorpej 
70129807ee5Sthorpej /*
702b1425120Schristos  *	Initialize the pmap module.
703b1425120Schristos  *	Called by vm_init, to initialize any structures that the pmap
704b1425120Schristos  *	system needs to map virtual memory.
705b1425120Schristos  */
706b1425120Schristos void
pmap_init(void)707b1425120Schristos pmap_init(void)
708b1425120Schristos {
709e4535b97Sskrll 	UVMHIST_FUNC(__func__);
710e4535b97Sskrll 	UVMHIST_CALLED(pmaphist);
711b1425120Schristos 
712b1425120Schristos 	/*
713b1425120Schristos 	 * Set a low water mark on the pv_entry pool, so that we are
714b1425120Schristos 	 * more likely to have these around even in extreme memory
715b1425120Schristos 	 * starvation.
716b1425120Schristos 	 */
717b1425120Schristos 	pool_setlowat(&pmap_pv_pool, pmap_pv_lowat);
718b1425120Schristos 
7195528d7fdSmatt 	/*
7205528d7fdSmatt 	 * Set the page colormask but allow pmap_md_init to override it.
7215528d7fdSmatt 	 */
7225528d7fdSmatt 	pmap_page_colormask = ptoa(uvmexp.colormask);
7235528d7fdSmatt 
724b1425120Schristos 	pmap_md_init();
725b1425120Schristos 
726b1425120Schristos 	/*
727b1425120Schristos 	 * Now it is safe to enable pv entry recording.
728b1425120Schristos 	 */
729b1425120Schristos 	pmap_initialized = true;
730b1425120Schristos }
731b1425120Schristos 
732b1425120Schristos /*
733b1425120Schristos  *	Create and return a physical map.
734b1425120Schristos  *
735b1425120Schristos  *	If the size specified for the map
736b1425120Schristos  *	is zero, the map is an actual physical
737b1425120Schristos  *	map, and may be referenced by the
738b1425120Schristos  *	hardware.
739b1425120Schristos  *
740b1425120Schristos  *	If the size specified is non-zero,
741b1425120Schristos  *	the map will be used in software only, and
742b1425120Schristos  *	is bounded by that size.
743b1425120Schristos  */
744b1425120Schristos pmap_t
pmap_create(void)745b1425120Schristos pmap_create(void)
746b1425120Schristos {
747e4535b97Sskrll 	UVMHIST_FUNC(__func__);
748e4535b97Sskrll 	UVMHIST_CALLED(pmaphist);
749b1425120Schristos 	PMAP_COUNT(create);
750b1425120Schristos 
751196ee94dSskrll 	static const struct uvm_pagerops pmap_pager = {
752196ee94dSskrll 		/* nothing */
753196ee94dSskrll 	};
754196ee94dSskrll 
7555528d7fdSmatt 	pmap_t pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
756b1425120Schristos 	memset(pmap, 0, PMAP_SIZE);
757b1425120Schristos 
758b1425120Schristos 	KASSERT(pmap->pm_pai[0].pai_link.le_prev == NULL);
759b1425120Schristos 
76035768640Sskrll 	pmap->pm_refcnt = 1;
761b1425120Schristos 	pmap->pm_minaddr = VM_MIN_ADDRESS;
762b1425120Schristos 	pmap->pm_maxaddr = VM_MAXUSER_ADDRESS;
763b1425120Schristos 
764196ee94dSskrll 	rw_init(&pmap->pm_obj_lock);
765196ee94dSskrll 	uvm_obj_init(&pmap->pm_uobject, &pmap_pager, false, 1);
766196ee94dSskrll 	uvm_obj_setlock(&pmap->pm_uobject, &pmap->pm_obj_lock);
767196ee94dSskrll 
768196ee94dSskrll 	TAILQ_INIT(&pmap->pm_ppg_list);
769196ee94dSskrll #if defined(PMAP_HWPAGEWALKER)
770196ee94dSskrll 	TAILQ_INIT(&pmap->pm_pdetab_list);
771196ee94dSskrll #endif
772196ee94dSskrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
773196ee94dSskrll 	TAILQ_INIT(&pmap->pm_segtab_list);
774196ee94dSskrll #endif
775196ee94dSskrll 
776b1425120Schristos 	pmap_segtab_init(pmap);
777b1425120Schristos 
7781a8ba865Snonaka #ifdef MULTIPROCESSOR
7791a8ba865Snonaka 	kcpuset_create(&pmap->pm_active, true);
7801a8ba865Snonaka 	kcpuset_create(&pmap->pm_onproc, true);
7815528d7fdSmatt 	KASSERT(pmap->pm_active != NULL);
7825528d7fdSmatt 	KASSERT(pmap->pm_onproc != NULL);
7831a8ba865Snonaka #endif
7841a8ba865Snonaka 
785cb32a134Spgoyette 	UVMHIST_LOG(pmaphist, " <-- done (pmap=%#jx)", (uintptr_t)pmap,
786cb32a134Spgoyette 	    0, 0, 0);
7875528d7fdSmatt 
788b1425120Schristos 	return pmap;
789b1425120Schristos }
790b1425120Schristos 
791b1425120Schristos /*
792b1425120Schristos  *	Retire the given physical map from service.
793b1425120Schristos  *	Should only be called if the map contains
794b1425120Schristos  *	no valid mappings.
795b1425120Schristos  */
796b1425120Schristos void
pmap_destroy(pmap_t pmap)797b1425120Schristos pmap_destroy(pmap_t pmap)
798b1425120Schristos {
799e4535b97Sskrll 	UVMHIST_FUNC(__func__);
800e4535b97Sskrll 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0);
801196ee94dSskrll 	UVMHIST_CALLARGS(pmapxtabhist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0);
802b1425120Schristos 
803ef3476fbSriastradh 	membar_release();
80435768640Sskrll 	if (atomic_dec_uint_nv(&pmap->pm_refcnt) > 0) {
805b1425120Schristos 		PMAP_COUNT(dereference);
8065528d7fdSmatt 		UVMHIST_LOG(pmaphist, " <-- done (deref)", 0, 0, 0, 0);
807196ee94dSskrll 		UVMHIST_LOG(pmapxtabhist, " <-- done (deref)", 0, 0, 0, 0);
808b1425120Schristos 		return;
809b1425120Schristos 	}
810ef3476fbSriastradh 	membar_acquire();
811b1425120Schristos 
812b1425120Schristos 	PMAP_COUNT(destroy);
81335768640Sskrll 	KASSERT(pmap->pm_refcnt == 0);
814b1425120Schristos 	kpreempt_disable();
81529807ee5Sthorpej 	pmap_tlb_miss_lock_enter();
816b1425120Schristos 	pmap_tlb_asid_release_all(pmap);
81729807ee5Sthorpej 	pmap_tlb_miss_lock_exit();
818948f90cbSskrll 	pmap_segtab_destroy(pmap, NULL, 0);
819b1425120Schristos 
820196ee94dSskrll 	KASSERT(TAILQ_EMPTY(&pmap->pm_ppg_list));
821196ee94dSskrll 
822196ee94dSskrll #ifdef _LP64
823196ee94dSskrll #if defined(PMAP_HWPAGEWALKER)
824196ee94dSskrll 	KASSERT(TAILQ_EMPTY(&pmap->pm_pdetab_list));
825196ee94dSskrll #endif
826196ee94dSskrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
827196ee94dSskrll 	KASSERT(TAILQ_EMPTY(&pmap->pm_segtab_list));
828196ee94dSskrll #endif
829196ee94dSskrll #endif
830196ee94dSskrll 	KASSERT(pmap->pm_uobject.uo_npages == 0);
831196ee94dSskrll 
832196ee94dSskrll 	uvm_obj_destroy(&pmap->pm_uobject, false);
833196ee94dSskrll 	rw_destroy(&pmap->pm_obj_lock);
834196ee94dSskrll 
83558634b7eSnonaka #ifdef MULTIPROCESSOR
836c0847103Snonaka 	kcpuset_destroy(pmap->pm_active);
837c0847103Snonaka 	kcpuset_destroy(pmap->pm_onproc);
8385528d7fdSmatt 	pmap->pm_active = NULL;
8395528d7fdSmatt 	pmap->pm_onproc = NULL;
84058634b7eSnonaka #endif
84158634b7eSnonaka 
842b1425120Schristos 	pool_put(&pmap_pmap_pool, pmap);
843b1425120Schristos 	kpreempt_enable();
844b1425120Schristos 
8455528d7fdSmatt 	UVMHIST_LOG(pmaphist, " <-- done (freed)", 0, 0, 0, 0);
846196ee94dSskrll 	UVMHIST_LOG(pmapxtabhist, " <-- done (freed)", 0, 0, 0, 0);
847b1425120Schristos }
848b1425120Schristos 
849b1425120Schristos /*
850b1425120Schristos  *	Add a reference to the specified pmap.
851b1425120Schristos  */
852b1425120Schristos void
pmap_reference(pmap_t pmap)853b1425120Schristos pmap_reference(pmap_t pmap)
854b1425120Schristos {
855e4535b97Sskrll 	UVMHIST_FUNC(__func__);
856e4535b97Sskrll 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0);
857b1425120Schristos 	PMAP_COUNT(reference);
858b1425120Schristos 
859b1425120Schristos 	if (pmap != NULL) {
86035768640Sskrll 		atomic_inc_uint(&pmap->pm_refcnt);
861b1425120Schristos 	}
862b1425120Schristos 
8635528d7fdSmatt 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
864b1425120Schristos }
865b1425120Schristos 
866b1425120Schristos /*
867b1425120Schristos  *	Make a new pmap (vmspace) active for the given process.
868b1425120Schristos  */
869b1425120Schristos void
pmap_activate(struct lwp * l)870b1425120Schristos pmap_activate(struct lwp *l)
871b1425120Schristos {
872b1425120Schristos 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
873b1425120Schristos 
874e4535b97Sskrll 	UVMHIST_FUNC(__func__);
875e4535b97Sskrll 	UVMHIST_CALLARGS(pmaphist, "(l=%#jx pmap=%#jx)", (uintptr_t)l,
876cb32a134Spgoyette 	    (uintptr_t)pmap, 0, 0);
877b1425120Schristos 	PMAP_COUNT(activate);
878b1425120Schristos 
879b1425120Schristos 	kpreempt_disable();
88029807ee5Sthorpej 	pmap_tlb_miss_lock_enter();
881b1425120Schristos 	pmap_tlb_asid_acquire(pmap, l);
882b1425120Schristos 	pmap_segtab_activate(pmap, l);
88329807ee5Sthorpej 	pmap_tlb_miss_lock_exit();
884b1425120Schristos 	kpreempt_enable();
885b1425120Schristos 
886cb32a134Spgoyette 	UVMHIST_LOG(pmaphist, " <-- done (%ju:%ju)", l->l_proc->p_pid,
887cb32a134Spgoyette 	    l->l_lid, 0, 0);
888b1425120Schristos }
889b1425120Schristos 
890b1425120Schristos /*
8915528d7fdSmatt  * Remove this page from all physical maps in which it resides.
8925528d7fdSmatt  * Reflects back modify bits to the pager.
8935528d7fdSmatt  */
8945528d7fdSmatt void
pmap_page_remove(struct vm_page_md * mdpg)89531d27c36Sskrll pmap_page_remove(struct vm_page_md *mdpg)
8965528d7fdSmatt {
8975528d7fdSmatt 	kpreempt_disable();
8985528d7fdSmatt 	VM_PAGEMD_PVLIST_LOCK(mdpg);
8995528d7fdSmatt 	pmap_pvlist_check(mdpg);
9005528d7fdSmatt 
90131d27c36Sskrll 	struct vm_page * const pg =
90231d27c36Sskrll 	    VM_PAGEMD_VMPAGE_P(mdpg) ? VM_MD_TO_PAGE(mdpg) : NULL;
90331d27c36Sskrll 
904e4535b97Sskrll 	UVMHIST_FUNC(__func__);
90531d27c36Sskrll 	if (pg) {
90631d27c36Sskrll 		UVMHIST_CALLARGS(pmaphist, "mdpg %#jx pg %#jx (pa %#jx): "
90731d27c36Sskrll 		    "execpage cleared", (uintptr_t)mdpg, (uintptr_t)pg,
90831d27c36Sskrll 		    VM_PAGE_TO_PHYS(pg), 0);
90931d27c36Sskrll 	} else {
91031d27c36Sskrll 		UVMHIST_CALLARGS(pmaphist, "mdpg %#jx", (uintptr_t)mdpg, 0,
91131d27c36Sskrll 		    0, 0);
91231d27c36Sskrll 	}
91331d27c36Sskrll 
914cfe955c5Smatt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
915cfe955c5Smatt 	pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE | VM_PAGEMD_UNCACHED);
916cfe955c5Smatt #else
917cfe955c5Smatt 	pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
918cfe955c5Smatt #endif
919cfe955c5Smatt 	PMAP_COUNT(exec_uncached_remove);
920cfe955c5Smatt 
9215528d7fdSmatt 	pv_entry_t pv = &mdpg->mdpg_first;
9225528d7fdSmatt 	if (pv->pv_pmap == NULL) {
9235528d7fdSmatt 		VM_PAGEMD_PVLIST_UNLOCK(mdpg);
9245528d7fdSmatt 		kpreempt_enable();
9255528d7fdSmatt 		UVMHIST_LOG(pmaphist, " <-- done (empty)", 0, 0, 0, 0);
9265528d7fdSmatt 		return;
9275528d7fdSmatt 	}
9285528d7fdSmatt 
9295528d7fdSmatt 	pv_entry_t npv;
9305528d7fdSmatt 	pv_entry_t pvp = NULL;
9315528d7fdSmatt 
9325528d7fdSmatt 	for (; pv != NULL; pv = npv) {
9335528d7fdSmatt 		npv = pv->pv_next;
9345528d7fdSmatt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
935930577a5Sskrll 		if (PV_ISKENTER_P(pv)) {
9365602c37dSskrll 			UVMHIST_LOG(pmaphist, " pv %#jx pmap %#jx va %#jx"
937cb32a134Spgoyette 			    " skip", (uintptr_t)pv, (uintptr_t)pv->pv_pmap,
938cb32a134Spgoyette 			    pv->pv_va, 0);
9395528d7fdSmatt 
9405528d7fdSmatt 			KASSERT(pv->pv_pmap == pmap_kernel());
9415528d7fdSmatt 
9425528d7fdSmatt 			/* Assume no more - it'll get fixed if there are */
9435528d7fdSmatt 			pv->pv_next = NULL;
9445528d7fdSmatt 
9455528d7fdSmatt 			/*
9465528d7fdSmatt 			 * pvp is non-null when we already have a PV_KENTER
9475528d7fdSmatt 			 * pv in pvh_first; otherwise we haven't seen a
9485528d7fdSmatt 			 * PV_KENTER pv and we need to copy this one to
9495528d7fdSmatt 			 * pvh_first
9505528d7fdSmatt 			 */
9515528d7fdSmatt 			if (pvp) {
9525528d7fdSmatt 				/*
9535528d7fdSmatt 				 * The previous PV_KENTER pv needs to point to
9545528d7fdSmatt 				 * this PV_KENTER pv
9555528d7fdSmatt 				 */
9565528d7fdSmatt 				pvp->pv_next = pv;
9575528d7fdSmatt 			} else {
9585528d7fdSmatt 				pv_entry_t fpv = &mdpg->mdpg_first;
9595528d7fdSmatt 				*fpv = *pv;
9605528d7fdSmatt 				KASSERT(fpv->pv_pmap == pmap_kernel());
9615528d7fdSmatt 			}
9625528d7fdSmatt 			pvp = pv;
9635528d7fdSmatt 			continue;
9645528d7fdSmatt 		}
9655528d7fdSmatt #endif
9665528d7fdSmatt 		const pmap_t pmap = pv->pv_pmap;
9675528d7fdSmatt 		vaddr_t va = trunc_page(pv->pv_va);
9685528d7fdSmatt 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
9695528d7fdSmatt 		KASSERTMSG(ptep != NULL, "%#"PRIxVADDR " %#"PRIxVADDR, va,
9705528d7fdSmatt 		    pmap_limits.virtual_end);
9715528d7fdSmatt 		pt_entry_t pte = *ptep;
9725602c37dSskrll 		UVMHIST_LOG(pmaphist, " pv %#jx pmap %#jx va %#jx"
9735602c37dSskrll 		    " pte %#jx", (uintptr_t)pv, (uintptr_t)pmap, va,
974cb32a134Spgoyette 		    pte_value(pte));
9755528d7fdSmatt 		if (!pte_valid_p(pte))
9765528d7fdSmatt 			continue;
9775528d7fdSmatt 		const bool is_kernel_pmap_p = (pmap == pmap_kernel());
9785528d7fdSmatt 		if (is_kernel_pmap_p) {
9795528d7fdSmatt 			PMAP_COUNT(remove_kernel_pages);
9805528d7fdSmatt 		} else {
9815528d7fdSmatt 			PMAP_COUNT(remove_user_pages);
9825528d7fdSmatt 		}
9835528d7fdSmatt 		if (pte_wired_p(pte))
9845528d7fdSmatt 			pmap->pm_stats.wired_count--;
9855528d7fdSmatt 		pmap->pm_stats.resident_count--;
9865528d7fdSmatt 
98729807ee5Sthorpej 		pmap_tlb_miss_lock_enter();
9885528d7fdSmatt 		const pt_entry_t npte = pte_nv_entry(is_kernel_pmap_p);
989e90fc54cSskrll 		pte_set(ptep, npte);
99067f41800Sskrll 		if (__predict_true(!(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE))) {
9915528d7fdSmatt 			/*
9925528d7fdSmatt 			 * Flush the TLB for the given address.
9935528d7fdSmatt 			 */
9945528d7fdSmatt 			pmap_tlb_invalidate_addr(pmap, va);
99567f41800Sskrll 		}
99629807ee5Sthorpej 		pmap_tlb_miss_lock_exit();
9975528d7fdSmatt 
9985528d7fdSmatt 		/*
9995528d7fdSmatt 		 * non-null means this is a non-pvh_first pv, so we should
10005528d7fdSmatt 		 * free it.
10015528d7fdSmatt 		 */
10025528d7fdSmatt 		if (pvp) {
10035528d7fdSmatt 			KASSERT(pvp->pv_pmap == pmap_kernel());
10045528d7fdSmatt 			KASSERT(pvp->pv_next == NULL);
10055528d7fdSmatt 			pmap_pv_free(pv);
10065528d7fdSmatt 		} else {
10075528d7fdSmatt 			pv->pv_pmap = NULL;
10085528d7fdSmatt 			pv->pv_next = NULL;
10095528d7fdSmatt 		}
10105528d7fdSmatt 	}
10115528d7fdSmatt 
10125528d7fdSmatt 	pmap_pvlist_check(mdpg);
10135528d7fdSmatt 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
10145528d7fdSmatt 	kpreempt_enable();
10155528d7fdSmatt 
10165528d7fdSmatt 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
10175528d7fdSmatt }
10185528d7fdSmatt 
101931d27c36Sskrll #ifdef __HAVE_PMAP_PV_TRACK
102031d27c36Sskrll /*
102131d27c36Sskrll  * pmap_pv_protect: change protection of an unmanaged pv-tracked page from
102231d27c36Sskrll  * all pmaps that map it
102331d27c36Sskrll  */
102431d27c36Sskrll void
pmap_pv_protect(paddr_t pa,vm_prot_t prot)102531d27c36Sskrll pmap_pv_protect(paddr_t pa, vm_prot_t prot)
102631d27c36Sskrll {
102731d27c36Sskrll 
102831d27c36Sskrll 	/* the only case is remove at the moment */
102931d27c36Sskrll 	KASSERT(prot == VM_PROT_NONE);
103031d27c36Sskrll 	struct pmap_page *pp;
103131d27c36Sskrll 
103231d27c36Sskrll 	pp = pmap_pv_tracked(pa);
103331d27c36Sskrll 	if (pp == NULL)
103431d27c36Sskrll 		panic("pmap_pv_protect: page not pv-tracked: 0x%"PRIxPADDR,
103531d27c36Sskrll 		    pa);
103631d27c36Sskrll 
103731d27c36Sskrll 	struct vm_page_md *mdpg = PMAP_PAGE_TO_MD(pp);
103831d27c36Sskrll 	pmap_page_remove(mdpg);
103931d27c36Sskrll }
104031d27c36Sskrll #endif
10415528d7fdSmatt 
10425528d7fdSmatt /*
1043b1425120Schristos  *	Make a previously active pmap (vmspace) inactive.
1044b1425120Schristos  */
1045b1425120Schristos void
pmap_deactivate(struct lwp * l)1046b1425120Schristos pmap_deactivate(struct lwp *l)
1047b1425120Schristos {
1048b1425120Schristos 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
1049b1425120Schristos 
1050e4535b97Sskrll 	UVMHIST_FUNC(__func__);
1051e4535b97Sskrll 	UVMHIST_CALLARGS(pmaphist, "(l=%#jx pmap=%#jx)", (uintptr_t)l,
1052cb32a134Spgoyette 	    (uintptr_t)pmap, 0, 0);
1053b1425120Schristos 	PMAP_COUNT(deactivate);
1054b1425120Schristos 
1055b1425120Schristos 	kpreempt_disable();
10565528d7fdSmatt 	KASSERT(l == curlwp || l->l_cpu == curlwp->l_cpu);
105729807ee5Sthorpej 	pmap_tlb_miss_lock_enter();
1058b1425120Schristos 	pmap_tlb_asid_deactivate(pmap);
1059599fa058Sskrll 	pmap_segtab_deactivate(pmap);
106029807ee5Sthorpej 	pmap_tlb_miss_lock_exit();
1061b1425120Schristos 	kpreempt_enable();
1062b1425120Schristos 
1063cb32a134Spgoyette 	UVMHIST_LOG(pmaphist, " <-- done (%ju:%ju)", l->l_proc->p_pid,
1064cb32a134Spgoyette 	    l->l_lid, 0, 0);
1065b1425120Schristos }
1066b1425120Schristos 
1067b1425120Schristos void
pmap_update(struct pmap * pmap)1068b1425120Schristos pmap_update(struct pmap *pmap)
1069b1425120Schristos {
1070e4535b97Sskrll 	UVMHIST_FUNC(__func__);
1071e4535b97Sskrll 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0);
1072b1425120Schristos 	PMAP_COUNT(update);
1073b1425120Schristos 
1074b1425120Schristos 	kpreempt_disable();
1075ccd0ac49Sskrll #if defined(MULTIPROCESSOR) && defined(PMAP_TLB_NEED_SHOOTDOWN)
1076b1425120Schristos 	u_int pending = atomic_swap_uint(&pmap->pm_shootdown_pending, 0);
1077b1425120Schristos 	if (pending && pmap_tlb_shootdown_bystanders(pmap))
1078b1425120Schristos 		PMAP_COUNT(shootdown_ipis);
1079b1425120Schristos #endif
108029807ee5Sthorpej 	pmap_tlb_miss_lock_enter();
108178531cbdSnonaka #if defined(DEBUG) && !defined(MULTIPROCESSOR)
1082b1425120Schristos 	pmap_tlb_check(pmap, pmap_md_tlb_check_entry);
1083b1425120Schristos #endif /* DEBUG */
1084b1425120Schristos 
1085b1425120Schristos 	/*
1086b1425120Schristos 	 * If pmap_remove_all was called, we deactivated ourselves and nuked
1087b1425120Schristos 	 * our ASID.  Now we have to reactivate ourselves.
1088b1425120Schristos 	 */
1089b1425120Schristos 	if (__predict_false(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE)) {
1090b1425120Schristos 		pmap->pm_flags ^= PMAP_DEFERRED_ACTIVATE;
1091b1425120Schristos 		pmap_tlb_asid_acquire(pmap, curlwp);
1092b1425120Schristos 		pmap_segtab_activate(pmap, curlwp);
1093b1425120Schristos 	}
109429807ee5Sthorpej 	pmap_tlb_miss_lock_exit();
1095b1425120Schristos 	kpreempt_enable();
1096b1425120Schristos 
10975256ea23Sskrll 	UVMHIST_LOG(pmaphist, " <-- done (kernel=%jd)",
1098cb32a134Spgoyette 		    (pmap == pmap_kernel() ? 1 : 0), 0, 0, 0);
1099b1425120Schristos }
1100b1425120Schristos 
1101b1425120Schristos /*
1102b1425120Schristos  *	Remove the given range of addresses from the specified map.
1103b1425120Schristos  *
1104b1425120Schristos  *	It is assumed that the start and end are properly
1105b1425120Schristos  *	rounded to the page size.
1106b1425120Schristos  */
1107b1425120Schristos 
1108b1425120Schristos static bool
pmap_pte_remove(pmap_t pmap,vaddr_t sva,vaddr_t eva,pt_entry_t * ptep,uintptr_t flags)1109b1425120Schristos pmap_pte_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
1110b1425120Schristos     uintptr_t flags)
1111b1425120Schristos {
1112b1425120Schristos 	const pt_entry_t npte = flags;
1113b1425120Schristos 	const bool is_kernel_pmap_p = (pmap == pmap_kernel());
1114b1425120Schristos 
1115e4535b97Sskrll 	UVMHIST_FUNC(__func__);
11165256ea23Sskrll 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx kernel=%jd va=%#jx..%#jx)",
11172b807ab8Sskrll 	    (uintptr_t)pmap, (is_kernel_pmap_p ? 1 : 0), sva, eva);
1118e4535b97Sskrll 	UVMHIST_LOG(pmaphist, "ptep=%#jx, flags(npte)=%#jx)",
1119cb32a134Spgoyette 	    (uintptr_t)ptep, flags, 0, 0);
1120b1425120Schristos 
1121b1425120Schristos 	KASSERT(kpreempt_disabled());
1122b1425120Schristos 
1123b1425120Schristos 	for (; sva < eva; sva += NBPG, ptep++) {
11245528d7fdSmatt 		const pt_entry_t pte = *ptep;
11255528d7fdSmatt 		if (!pte_valid_p(pte))
1126b1425120Schristos 			continue;
11275528d7fdSmatt 		if (is_kernel_pmap_p) {
11285528d7fdSmatt 			PMAP_COUNT(remove_kernel_pages);
11295528d7fdSmatt 		} else {
1130b1425120Schristos 			PMAP_COUNT(remove_user_pages);
11315528d7fdSmatt 		}
11325528d7fdSmatt 		if (pte_wired_p(pte))
1133b1425120Schristos 			pmap->pm_stats.wired_count--;
1134b1425120Schristos 		pmap->pm_stats.resident_count--;
11355528d7fdSmatt 		struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte));
1136b1425120Schristos 		if (__predict_true(pg != NULL)) {
11375528d7fdSmatt 			pmap_remove_pv(pmap, sva, pg, pte_modified_p(pte));
1138b1425120Schristos 		}
113929807ee5Sthorpej 		pmap_tlb_miss_lock_enter();
1140e90fc54cSskrll 		pte_set(ptep, npte);
114167f41800Sskrll 		if (__predict_true(!(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE))) {
1142b1425120Schristos 			/*
1143b1425120Schristos 			 * Flush the TLB for the given address.
1144b1425120Schristos 			 */
1145b1425120Schristos 			pmap_tlb_invalidate_addr(pmap, sva);
114667f41800Sskrll 		}
114729807ee5Sthorpej 		pmap_tlb_miss_lock_exit();
1148b1425120Schristos 	}
11495528d7fdSmatt 
11505528d7fdSmatt 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
11515528d7fdSmatt 
1152b1425120Schristos 	return false;
1153b1425120Schristos }
1154b1425120Schristos 
1155b1425120Schristos void
pmap_remove(pmap_t pmap,vaddr_t sva,vaddr_t eva)1156b1425120Schristos pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
1157b1425120Schristos {
1158b1425120Schristos 	const bool is_kernel_pmap_p = (pmap == pmap_kernel());
1159b1425120Schristos 	const pt_entry_t npte = pte_nv_entry(is_kernel_pmap_p);
1160b1425120Schristos 
1161e4535b97Sskrll 	UVMHIST_FUNC(__func__);
1162e4535b97Sskrll 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx..%#jx)",
1163cb32a134Spgoyette 	    (uintptr_t)pmap, sva, eva, 0);
1164b1425120Schristos 
11655528d7fdSmatt 	if (is_kernel_pmap_p) {
1166b1425120Schristos 		PMAP_COUNT(remove_kernel_calls);
11675528d7fdSmatt 	} else {
1168b1425120Schristos 		PMAP_COUNT(remove_user_calls);
1169b1425120Schristos 	}
11705528d7fdSmatt #ifdef PMAP_FAULTINFO
11715528d7fdSmatt 	curpcb->pcb_faultinfo.pfi_faultaddr = 0;
11725528d7fdSmatt 	curpcb->pcb_faultinfo.pfi_repeats = 0;
1173ab6f34e3Sskrll 	curpcb->pcb_faultinfo.pfi_faultptep = NULL;
1174b1425120Schristos #endif
1175b1425120Schristos 	kpreempt_disable();
11765528d7fdSmatt 	pmap_addr_range_check(pmap, sva, eva, __func__);
1177b1425120Schristos 	pmap_pte_process(pmap, sva, eva, pmap_pte_remove, npte);
1178b1425120Schristos 	kpreempt_enable();
1179b1425120Schristos 
11805528d7fdSmatt 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
1181b1425120Schristos }
1182b1425120Schristos 
1183b1425120Schristos /*
1184b1425120Schristos  *	pmap_page_protect:
1185b1425120Schristos  *
1186b1425120Schristos  *	Lower the permission for all mappings to a given page.
1187b1425120Schristos  */
1188b1425120Schristos void
pmap_page_protect(struct vm_page * pg,vm_prot_t prot)1189b1425120Schristos pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
1190b1425120Schristos {
1191b1425120Schristos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
1192b1425120Schristos 	pv_entry_t pv;
1193b1425120Schristos 	vaddr_t va;
1194b1425120Schristos 
1195e4535b97Sskrll 	UVMHIST_FUNC(__func__);
1196e4535b97Sskrll 	UVMHIST_CALLARGS(pmaphist, "(pg=%#jx (pa %#jx) prot=%#jx)",
1197cb32a134Spgoyette 	    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), prot, 0);
1198b1425120Schristos 	PMAP_COUNT(page_protect);
1199b1425120Schristos 
1200b1425120Schristos 	switch (prot) {
1201b1425120Schristos 	case VM_PROT_READ | VM_PROT_WRITE:
1202b1425120Schristos 	case VM_PROT_ALL:
1203b1425120Schristos 		break;
1204b1425120Schristos 
1205b1425120Schristos 	/* copy_on_write */
1206b1425120Schristos 	case VM_PROT_READ:
1207b1425120Schristos 	case VM_PROT_READ | VM_PROT_EXECUTE:
1208b1425120Schristos 		pv = &mdpg->mdpg_first;
12095528d7fdSmatt 		kpreempt_disable();
12105528d7fdSmatt 		VM_PAGEMD_PVLIST_READLOCK(mdpg);
12115528d7fdSmatt 		pmap_pvlist_check(mdpg);
1212b1425120Schristos 		/*
12135a99893dSskrll 		 * Loop over all current mappings setting/clearing as
12145a99893dSskrll 		 * appropriate.
1215b1425120Schristos 		 */
1216b1425120Schristos 		if (pv->pv_pmap != NULL) {
1217b1425120Schristos 			while (pv != NULL) {
12185528d7fdSmatt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
1219930577a5Sskrll 				if (PV_ISKENTER_P(pv)) {
12205528d7fdSmatt 					pv = pv->pv_next;
12215528d7fdSmatt 					continue;
12225528d7fdSmatt 				}
12235528d7fdSmatt #endif
1224b1425120Schristos 				const pmap_t pmap = pv->pv_pmap;
12255528d7fdSmatt 				va = trunc_page(pv->pv_va);
12265528d7fdSmatt 				const uintptr_t gen =
1227b1425120Schristos 				    VM_PAGEMD_PVLIST_UNLOCK(mdpg);
1228b1425120Schristos 				pmap_protect(pmap, va, va + PAGE_SIZE, prot);
1229b1425120Schristos 				KASSERT(pv->pv_pmap == pmap);
1230b1425120Schristos 				pmap_update(pmap);
12315528d7fdSmatt 				if (gen != VM_PAGEMD_PVLIST_READLOCK(mdpg)) {
1232b1425120Schristos 					pv = &mdpg->mdpg_first;
1233b1425120Schristos 				} else {
1234b1425120Schristos 					pv = pv->pv_next;
1235b1425120Schristos 				}
12365528d7fdSmatt 				pmap_pvlist_check(mdpg);
1237b1425120Schristos 			}
1238b1425120Schristos 		}
12395528d7fdSmatt 		pmap_pvlist_check(mdpg);
1240b1425120Schristos 		VM_PAGEMD_PVLIST_UNLOCK(mdpg);
12415528d7fdSmatt 		kpreempt_enable();
1242b1425120Schristos 		break;
1243b1425120Schristos 
1244b1425120Schristos 	/* remove_all */
1245b1425120Schristos 	default:
124631d27c36Sskrll 		pmap_page_remove(mdpg);
1247b1425120Schristos 	}
1248b1425120Schristos 
12495528d7fdSmatt 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
1250b1425120Schristos }
1251b1425120Schristos 
1252b1425120Schristos static bool
pmap_pte_protect(pmap_t pmap,vaddr_t sva,vaddr_t eva,pt_entry_t * ptep,uintptr_t flags)1253b1425120Schristos pmap_pte_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
1254b1425120Schristos 	uintptr_t flags)
1255b1425120Schristos {
1256b1425120Schristos 	const vm_prot_t prot = (flags & VM_PROT_ALL);
1257b1425120Schristos 
1258e4535b97Sskrll 	UVMHIST_FUNC(__func__);
12595256ea23Sskrll 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx kernel=%jd va=%#jx..%#jx)",
12608c42a6afSpgoyette 	    (uintptr_t)pmap, (pmap == pmap_kernel() ? 1 : 0), sva, eva);
1261cb32a134Spgoyette 	UVMHIST_LOG(pmaphist, "ptep=%#jx, flags(npte)=%#jx)",
1262cb32a134Spgoyette 	    (uintptr_t)ptep, flags, 0, 0);
1263b1425120Schristos 
1264b1425120Schristos 	KASSERT(kpreempt_disabled());
1265b1425120Schristos 	/*
1266b1425120Schristos 	 * Change protection on every valid mapping within this segment.
1267b1425120Schristos 	 */
1268b1425120Schristos 	for (; sva < eva; sva += NBPG, ptep++) {
12695528d7fdSmatt 		pt_entry_t pte = *ptep;
12705528d7fdSmatt 		if (!pte_valid_p(pte))
1271b1425120Schristos 			continue;
12725528d7fdSmatt 		struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte));
12735528d7fdSmatt 		if (pg != NULL && pte_modified_p(pte)) {
1274b1425120Schristos 			struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
1275b1425120Schristos 			if (VM_PAGEMD_EXECPAGE_P(mdpg)) {
1276af0cb0a3Sskrll 				KASSERT(!VM_PAGEMD_PVLIST_EMPTY_P(mdpg));
12775528d7fdSmatt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
12785528d7fdSmatt 				if (VM_PAGEMD_CACHED_P(mdpg)) {
12795528d7fdSmatt #endif
1280b1425120Schristos 					UVMHIST_LOG(pmapexechist,
1281cb32a134Spgoyette 					    "pg %#jx (pa %#jx): "
128229bcf019Smrg 					    "syncicached performed",
1283cb32a134Spgoyette 					    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg),
1284cb32a134Spgoyette 					    0, 0);
1285b1425120Schristos 					pmap_page_syncicache(pg);
1286b1425120Schristos 					PMAP_COUNT(exec_synced_protect);
12875528d7fdSmatt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
12885528d7fdSmatt 				}
12895528d7fdSmatt #endif
1290b1425120Schristos 			}
1291b1425120Schristos 		}
12925528d7fdSmatt 		pte = pte_prot_downgrade(pte, prot);
12935528d7fdSmatt 		if (*ptep != pte) {
129429807ee5Sthorpej 			pmap_tlb_miss_lock_enter();
1295e90fc54cSskrll 			pte_set(ptep, pte);
1296b1425120Schristos 			/*
1297b1425120Schristos 			 * Update the TLB if needed.
1298b1425120Schristos 			 */
12995528d7fdSmatt 			pmap_tlb_update_addr(pmap, sva, pte, PMAP_TLB_NEED_IPI);
130029807ee5Sthorpej 			pmap_tlb_miss_lock_exit();
1301b1425120Schristos 		}
1302b1425120Schristos 	}
13035528d7fdSmatt 
13045528d7fdSmatt 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
13055528d7fdSmatt 
1306b1425120Schristos 	return false;
1307b1425120Schristos }
1308b1425120Schristos 
1309b1425120Schristos /*
1310b1425120Schristos  *	Set the physical protection on the
1311b1425120Schristos  *	specified range of this map as requested.
1312b1425120Schristos  */
1313b1425120Schristos void
pmap_protect(pmap_t pmap,vaddr_t sva,vaddr_t eva,vm_prot_t prot)1314b1425120Schristos pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1315b1425120Schristos {
1316e4535b97Sskrll 	UVMHIST_FUNC(__func__);
1317e4535b97Sskrll 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx..%#jx, prot=%ju)",
1318cb32a134Spgoyette 	    (uintptr_t)pmap, sva, eva, prot);
1319b1425120Schristos 	PMAP_COUNT(protect);
1320b1425120Schristos 
1321b1425120Schristos 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1322b1425120Schristos 		pmap_remove(pmap, sva, eva);
13235528d7fdSmatt 		UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
1324b1425120Schristos 		return;
1325b1425120Schristos 	}
1326b1425120Schristos 
1327b1425120Schristos 	/*
1328b1425120Schristos 	 * Change protection on every valid mapping within this segment.
1329b1425120Schristos 	 */
1330b1425120Schristos 	kpreempt_disable();
13315528d7fdSmatt 	pmap_addr_range_check(pmap, sva, eva, __func__);
1332b1425120Schristos 	pmap_pte_process(pmap, sva, eva, pmap_pte_protect, prot);
1333b1425120Schristos 	kpreempt_enable();
1334b1425120Schristos 
13355528d7fdSmatt 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
1336b1425120Schristos }
1337b1425120Schristos 
13385528d7fdSmatt #if defined(PMAP_VIRTUAL_CACHE_ALIASES) && !defined(PMAP_NO_PV_UNCACHED)
1339b1425120Schristos /*
1340b1425120Schristos  *	pmap_page_cache:
1341b1425120Schristos  *
1342b1425120Schristos  *	Change all mappings of a managed page to cached/uncached.
1343b1425120Schristos  */
13445528d7fdSmatt void
pmap_page_cache(struct vm_page_md * mdpg,bool cached)134531d27c36Sskrll pmap_page_cache(struct vm_page_md *mdpg, bool cached)
1346b1425120Schristos {
134731d27c36Sskrll #ifdef UVMHIST
134831d27c36Sskrll 	const bool vmpage_p = VM_PAGEMD_VMPAGE_P(mdpg);
134931d27c36Sskrll 	struct vm_page * const pg = vmpage_p ? VM_MD_TO_PAGE(mdpg) : NULL;
135031d27c36Sskrll #endif
13515528d7fdSmatt 
1352e4535b97Sskrll 	UVMHIST_FUNC(__func__);
135331d27c36Sskrll 	UVMHIST_CALLARGS(pmaphist, "(mdpg=%#jx (pa %#jx) cached=%jd vmpage %jd)",
135431d27c36Sskrll 	    (uintptr_t)mdpg, pg ? VM_PAGE_TO_PHYS(pg) : 0, cached, vmpage_p);
13555528d7fdSmatt 
1356b1425120Schristos 	KASSERT(kpreempt_disabled());
13575528d7fdSmatt 	KASSERT(VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
1358b1425120Schristos 
1359b1425120Schristos 	if (cached) {
1360b1425120Schristos 		pmap_page_clear_attributes(mdpg, VM_PAGEMD_UNCACHED);
1361b1425120Schristos 		PMAP_COUNT(page_cache_restorations);
1362b1425120Schristos 	} else {
1363b1425120Schristos 		pmap_page_set_attributes(mdpg, VM_PAGEMD_UNCACHED);
1364b1425120Schristos 		PMAP_COUNT(page_cache_evictions);
1365b1425120Schristos 	}
1366b1425120Schristos 
13675528d7fdSmatt 	for (pv_entry_t pv = &mdpg->mdpg_first; pv != NULL; pv = pv->pv_next) {
1368b1425120Schristos 		pmap_t pmap = pv->pv_pmap;
13695528d7fdSmatt 		vaddr_t va = trunc_page(pv->pv_va);
1370b1425120Schristos 
1371b1425120Schristos 		KASSERT(pmap != NULL);
1372b1425120Schristos 		KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va));
1373b1425120Schristos 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
1374b1425120Schristos 		if (ptep == NULL)
1375b1425120Schristos 			continue;
13765528d7fdSmatt 		pt_entry_t pte = *ptep;
13775528d7fdSmatt 		if (pte_valid_p(pte)) {
13785528d7fdSmatt 			pte = pte_cached_change(pte, cached);
137929807ee5Sthorpej 			pmap_tlb_miss_lock_enter();
1380e90fc54cSskrll 			pte_set(ptep, pte);
13815528d7fdSmatt 			pmap_tlb_update_addr(pmap, va, pte, PMAP_TLB_NEED_IPI);
138229807ee5Sthorpej 			pmap_tlb_miss_lock_exit();
1383b1425120Schristos 		}
1384b1425120Schristos 	}
13855528d7fdSmatt 
13865528d7fdSmatt 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
1387b1425120Schristos }
13885528d7fdSmatt #endif	/* PMAP_VIRTUAL_CACHE_ALIASES && !PMAP_NO_PV_UNCACHED */
1389b1425120Schristos 
1390b1425120Schristos /*
1391b1425120Schristos  *	Insert the given physical page (p) at
1392b1425120Schristos  *	the specified virtual address (v) in the
1393b1425120Schristos  *	target physical map with the protection requested.
1394b1425120Schristos  *
1395b1425120Schristos  *	If specified, the page will be wired down, meaning
1396b1425120Schristos  *	that the related pte can not be reclaimed.
1397b1425120Schristos  *
1398b1425120Schristos  *	NB:  This is the only routine which MAY NOT lazy-evaluate
1399b1425120Schristos  *	or lose information.  That is, this routine must actually
1400b1425120Schristos  *	insert this page into the given map NOW.
1401b1425120Schristos  */
1402b1425120Schristos int
pmap_enter(pmap_t pmap,vaddr_t va,paddr_t pa,vm_prot_t prot,u_int flags)1403b1425120Schristos pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1404b1425120Schristos {
1405b1425120Schristos 	const bool wired = (flags & PMAP_WIRED) != 0;
1406b1425120Schristos 	const bool is_kernel_pmap_p = (pmap == pmap_kernel());
14078d974145Sskrll #if defined(EFI_RUNTIME)
14088d974145Sskrll 	const bool is_efirt_pmap_p = (pmap == pmap_efirt());
14098d974145Sskrll #else
14108d974145Sskrll 	const bool is_efirt_pmap_p = false;
14118d974145Sskrll #endif
14125528d7fdSmatt 	u_int update_flags = (flags & VM_PROT_ALL) != 0 ? PMAP_TLB_INSERT : 0;
1413b1425120Schristos #ifdef UVMHIST
1414b1425120Schristos 	struct kern_history * const histp =
1415b1425120Schristos 	    ((prot & VM_PROT_EXECUTE) ? &pmapexechist : &pmaphist);
1416b1425120Schristos #endif
1417b1425120Schristos 
1418e4535b97Sskrll 	UVMHIST_FUNC(__func__);
1419e4535b97Sskrll 	UVMHIST_CALLARGS(*histp, "(pmap=%#jx, va=%#jx, pa=%#jx",
1420cb32a134Spgoyette 	    (uintptr_t)pmap, va, pa, 0);
1421cb32a134Spgoyette 	UVMHIST_LOG(*histp, "prot=%#jx flags=%#jx)", prot, flags, 0, 0);
1422b1425120Schristos 
1423b1425120Schristos 	const bool good_color = PMAP_PAGE_COLOROK_P(pa, va);
1424b1425120Schristos 	if (is_kernel_pmap_p) {
1425b1425120Schristos 		PMAP_COUNT(kernel_mappings);
1426b1425120Schristos 		if (!good_color)
1427b1425120Schristos 			PMAP_COUNT(kernel_mappings_bad);
1428b1425120Schristos 	} else {
1429b1425120Schristos 		PMAP_COUNT(user_mappings);
1430b1425120Schristos 		if (!good_color)
1431b1425120Schristos 			PMAP_COUNT(user_mappings_bad);
1432b1425120Schristos 	}
14335528d7fdSmatt 	pmap_addr_range_check(pmap, va, va, __func__);
1434b1425120Schristos 
14355528d7fdSmatt 	KASSERTMSG(prot & VM_PROT_READ, "no READ (%#x) in prot %#x",
14365528d7fdSmatt 	    VM_PROT_READ, prot);
1437b1425120Schristos 
1438b1425120Schristos 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
14395528d7fdSmatt 	struct vm_page_md * const mdpg = (pg ? VM_PAGE_TO_MD(pg) : NULL);
1440b1425120Schristos 
144131d27c36Sskrll 	struct vm_page_md *mdpp = NULL;
144231d27c36Sskrll #ifdef __HAVE_PMAP_PV_TRACK
144331d27c36Sskrll 	struct pmap_page *pp = pmap_pv_tracked(pa);
144431d27c36Sskrll 	mdpp = pp ? PMAP_PAGE_TO_MD(pp) : NULL;
144531d27c36Sskrll #endif
144631d27c36Sskrll 
144731d27c36Sskrll 	if (mdpg) {
1448b1425120Schristos 		/* Set page referenced/modified status based on flags */
14495528d7fdSmatt 		if (flags & VM_PROT_WRITE) {
1450b1425120Schristos 			pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED | VM_PAGEMD_REFERENCED);
14515528d7fdSmatt 		} else if (flags & VM_PROT_ALL) {
1452b1425120Schristos 			pmap_page_set_attributes(mdpg, VM_PAGEMD_REFERENCED);
14535528d7fdSmatt 		}
1454b1425120Schristos 
14555528d7fdSmatt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
14565528d7fdSmatt 		if (!VM_PAGEMD_CACHED_P(mdpg)) {
1457b1425120Schristos 			flags |= PMAP_NOCACHE;
14585528d7fdSmatt 			PMAP_COUNT(uncached_mappings);
14595528d7fdSmatt 		}
1460b1425120Schristos #endif
1461b1425120Schristos 
1462b1425120Schristos 		PMAP_COUNT(managed_mappings);
146331d27c36Sskrll 	} else if (mdpp) {
146431d27c36Sskrll #ifdef __HAVE_PMAP_PV_TRACK
146531d27c36Sskrll 		pmap_page_set_attributes(mdpg, VM_PAGEMD_REFERENCED);
146631d27c36Sskrll 
146731d27c36Sskrll 		PMAP_COUNT(pvtracked_mappings);
146831d27c36Sskrll #endif
14698d974145Sskrll 	} else if (is_efirt_pmap_p) {
14708d974145Sskrll 		PMAP_COUNT(efirt_mappings);
1471b1425120Schristos 	} else {
1472b1425120Schristos 		/*
1473b1425120Schristos 		 * Assumption: if it is not part of our managed memory
1474b1425120Schristos 		 * then it must be device memory which may be volatile.
1475b1425120Schristos 		 */
14765528d7fdSmatt 		if ((flags & PMAP_CACHE_MASK) == 0)
1477b1425120Schristos 			flags |= PMAP_NOCACHE;
1478b1425120Schristos 		PMAP_COUNT(unmanaged_mappings);
1479b1425120Schristos 	}
1480b1425120Schristos 
14818d974145Sskrll 	KASSERTMSG(mdpg == NULL || mdpp == NULL || is_efirt_pmap_p,
14828d974145Sskrll 	    "mdpg %p mdpp %p efirt %s", mdpg, mdpp,
14838d974145Sskrll 	    is_efirt_pmap_p ? "true" : "false");
148431d27c36Sskrll 
148531d27c36Sskrll 	struct vm_page_md *md = (mdpg != NULL) ? mdpg : mdpp;
14868d974145Sskrll 	pt_entry_t npte = is_efirt_pmap_p ?
14878d974145Sskrll 	    pte_make_enter_efirt(pa, prot, flags) :
14888d974145Sskrll 	    pte_make_enter(pa, md, prot, flags, is_kernel_pmap_p);
1489b1425120Schristos 
1490b1425120Schristos 	kpreempt_disable();
14915528d7fdSmatt 
1492b1425120Schristos 	pt_entry_t * const ptep = pmap_pte_reserve(pmap, va, flags);
1493b1425120Schristos 	if (__predict_false(ptep == NULL)) {
1494b1425120Schristos 		kpreempt_enable();
14955528d7fdSmatt 		UVMHIST_LOG(*histp, " <-- ENOMEM", 0, 0, 0, 0);
1496b1425120Schristos 		return ENOMEM;
1497b1425120Schristos 	}
14985528d7fdSmatt 	const pt_entry_t opte = *ptep;
1499061de4b2Sskrll 	const bool resident = pte_valid_p(opte);
1500061de4b2Sskrll 	bool remap = false;
1501061de4b2Sskrll 	if (resident) {
1502061de4b2Sskrll 		if (pte_to_paddr(opte) != pa) {
1503061de4b2Sskrll 			KASSERT(!is_kernel_pmap_p);
1504061de4b2Sskrll 			const pt_entry_t rpte = pte_nv_entry(false);
1505061de4b2Sskrll 
1506061de4b2Sskrll 			pmap_addr_range_check(pmap, va, va + NBPG, __func__);
1507061de4b2Sskrll 			pmap_pte_process(pmap, va, va + NBPG, pmap_pte_remove,
1508061de4b2Sskrll 			    rpte);
1509061de4b2Sskrll 			PMAP_COUNT(user_mappings_changed);
1510061de4b2Sskrll 			remap = true;
1511061de4b2Sskrll 		}
1512061de4b2Sskrll 		update_flags |= PMAP_TLB_NEED_IPI;
1513061de4b2Sskrll 	}
1514061de4b2Sskrll 
1515061de4b2Sskrll 	if (!resident || remap) {
1516061de4b2Sskrll 		pmap->pm_stats.resident_count++;
1517061de4b2Sskrll 	}
1518b1425120Schristos 
1519b1425120Schristos 	/* Done after case that may sleep/return. */
152031d27c36Sskrll 	if (md)
152131d27c36Sskrll 		pmap_enter_pv(pmap, va, pa, md, &npte, 0);
1522b1425120Schristos 
1523b1425120Schristos 	/*
1524b1425120Schristos 	 * Now validate mapping with desired protection/wiring.
1525b1425120Schristos 	 */
1526b1425120Schristos 	if (wired) {
1527b1425120Schristos 		pmap->pm_stats.wired_count++;
1528b1425120Schristos 		npte = pte_wire_entry(npte);
1529b1425120Schristos 	}
1530b1425120Schristos 
1531cb32a134Spgoyette 	UVMHIST_LOG(*histp, "new pte %#jx (pa %#jx)",
15325528d7fdSmatt 	    pte_value(npte), pa, 0, 0);
1533b1425120Schristos 
1534b1425120Schristos 	KASSERT(pte_valid_p(npte));
15355528d7fdSmatt 
153629807ee5Sthorpej 	pmap_tlb_miss_lock_enter();
1537e90fc54cSskrll 	pte_set(ptep, npte);
15385528d7fdSmatt 	pmap_tlb_update_addr(pmap, va, npte, update_flags);
153929807ee5Sthorpej 	pmap_tlb_miss_lock_exit();
1540b1425120Schristos 	kpreempt_enable();
1541b1425120Schristos 
1542b1425120Schristos 	if (pg != NULL && (prot == (VM_PROT_READ | VM_PROT_EXECUTE))) {
1543b1425120Schristos 		KASSERT(mdpg != NULL);
1544b1425120Schristos 		PMAP_COUNT(exec_mappings);
1545b1425120Schristos 		if (!VM_PAGEMD_EXECPAGE_P(mdpg) && pte_cached_p(npte)) {
1546b1425120Schristos 			if (!pte_deferred_exec_p(npte)) {
1547cb32a134Spgoyette 				UVMHIST_LOG(*histp, "va=%#jx pg %#jx: "
1548cb32a134Spgoyette 				    "immediate syncicache",
1549cb32a134Spgoyette 				    va, (uintptr_t)pg, 0, 0);
1550b1425120Schristos 				pmap_page_syncicache(pg);
1551b1425120Schristos 				pmap_page_set_attributes(mdpg,
1552b1425120Schristos 				    VM_PAGEMD_EXECPAGE);
1553b1425120Schristos 				PMAP_COUNT(exec_synced_mappings);
1554b1425120Schristos 			} else {
1555cb32a134Spgoyette 				UVMHIST_LOG(*histp, "va=%#jx pg %#jx: defer "
1556cb32a134Spgoyette 				    "syncicache: pte %#jx",
1557cb32a134Spgoyette 				    va, (uintptr_t)pg, npte, 0);
1558b1425120Schristos 			}
1559b1425120Schristos 		} else {
1560b1425120Schristos 			UVMHIST_LOG(*histp,
1561cb32a134Spgoyette 			    "va=%#jx pg %#jx: no syncicache cached %jd",
1562cb32a134Spgoyette 			    va, (uintptr_t)pg, pte_cached_p(npte), 0);
1563b1425120Schristos 		}
1564b1425120Schristos 	} else if (pg != NULL && (prot & VM_PROT_EXECUTE)) {
1565b1425120Schristos 		KASSERT(mdpg != NULL);
1566b1425120Schristos 		KASSERT(prot & VM_PROT_WRITE);
1567b1425120Schristos 		PMAP_COUNT(exec_mappings);
1568b1425120Schristos 		pmap_page_syncicache(pg);
1569b1425120Schristos 		pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
15705528d7fdSmatt 		UVMHIST_LOG(*histp,
1571cb32a134Spgoyette 		    "va=%#jx pg %#jx: immediate syncicache (writeable)",
1572cb32a134Spgoyette 		    va, (uintptr_t)pg, 0, 0);
1573b1425120Schristos 	}
1574b1425120Schristos 
15755528d7fdSmatt 	UVMHIST_LOG(*histp, " <-- 0 (OK)", 0, 0, 0, 0);
1576b1425120Schristos 	return 0;
1577b1425120Schristos }
1578b1425120Schristos 
1579b1425120Schristos void
pmap_kenter_pa(vaddr_t va,paddr_t pa,vm_prot_t prot,u_int flags)1580b1425120Schristos pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1581b1425120Schristos {
15825528d7fdSmatt 	pmap_t pmap = pmap_kernel();
1583b1425120Schristos 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
15845528d7fdSmatt 	struct vm_page_md * const mdpg = (pg ? VM_PAGE_TO_MD(pg) : NULL);
1585b1425120Schristos 
1586e4535b97Sskrll 	UVMHIST_FUNC(__func__);
1587e4535b97Sskrll 	UVMHIST_CALLARGS(pmaphist, "(va=%#jx pa=%#jx prot=%ju, flags=%#jx)",
15885528d7fdSmatt 	    va, pa, prot, flags);
1589b1425120Schristos 	PMAP_COUNT(kenter_pa);
1590b1425120Schristos 
15915528d7fdSmatt 	if (mdpg == NULL) {
1592b1425120Schristos 		PMAP_COUNT(kenter_pa_unmanaged);
15935528d7fdSmatt 		if ((flags & PMAP_CACHE_MASK) == 0)
1594b1425120Schristos 			flags |= PMAP_NOCACHE;
1595b1425120Schristos 	} else {
1596b1425120Schristos 		if ((flags & PMAP_NOCACHE) == 0 && !PMAP_PAGE_COLOROK_P(pa, va))
1597b1425120Schristos 			PMAP_COUNT(kenter_pa_bad);
15985528d7fdSmatt 	}
1599b1425120Schristos 
16005528d7fdSmatt 	pt_entry_t npte = pte_make_kenter_pa(pa, mdpg, prot, flags);
1601b1425120Schristos 	kpreempt_disable();
1602196ee94dSskrll 	pt_entry_t * const ptep = pmap_pte_reserve(pmap, va, 0);
1603196ee94dSskrll 
16045528d7fdSmatt 	KASSERTMSG(ptep != NULL, "%#"PRIxVADDR " %#"PRIxVADDR, va,
16055528d7fdSmatt 	    pmap_limits.virtual_end);
1606b1425120Schristos 	KASSERT(!pte_valid_p(*ptep));
16075528d7fdSmatt 
16085528d7fdSmatt 	/*
16095528d7fdSmatt 	 * No need to track non-managed pages or PMAP_KMPAGEs pages for aliases
16105528d7fdSmatt 	 */
16115528d7fdSmatt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
1612a35e54e6Smatt 	if (pg != NULL && (flags & PMAP_KMPAGE) == 0
1613a35e54e6Smatt 	    && pmap_md_virtual_cache_aliasing_p()) {
161431d27c36Sskrll 		pmap_enter_pv(pmap, va, pa, mdpg, &npte, PV_KENTER);
16155528d7fdSmatt 	}
16165528d7fdSmatt #endif
16175528d7fdSmatt 
1618b1425120Schristos 	/*
1619b1425120Schristos 	 * We have the option to force this mapping into the TLB but we
1620b1425120Schristos 	 * don't.  Instead let the next reference to the page do it.
1621b1425120Schristos 	 */
162229807ee5Sthorpej 	pmap_tlb_miss_lock_enter();
1623e90fc54cSskrll 	pte_set(ptep, npte);
1624b1425120Schristos 	pmap_tlb_update_addr(pmap_kernel(), va, npte, 0);
162529807ee5Sthorpej 	pmap_tlb_miss_lock_exit();
1626b1425120Schristos 	kpreempt_enable();
1627b1425120Schristos #if DEBUG > 1
1628b1425120Schristos 	for (u_int i = 0; i < PAGE_SIZE / sizeof(long); i++) {
1629b1425120Schristos 		if (((long *)va)[i] != ((long *)pa)[i])
1630b1425120Schristos 			panic("%s: contents (%lx) of va %#"PRIxVADDR
1631b1425120Schristos 			    " != contents (%lx) of pa %#"PRIxPADDR, __func__,
1632b1425120Schristos 			    ((long *)va)[i], va, ((long *)pa)[i], pa);
1633b1425120Schristos 	}
1634b1425120Schristos #endif
16355528d7fdSmatt 
1636cb32a134Spgoyette 	UVMHIST_LOG(pmaphist, " <-- done (ptep=%#jx)", (uintptr_t)ptep, 0, 0,
1637cb32a134Spgoyette 	    0);
1638b1425120Schristos }
1639b1425120Schristos 
16405528d7fdSmatt /*
16415528d7fdSmatt  *	Remove the given range of addresses from the kernel map.
16425528d7fdSmatt  *
16435528d7fdSmatt  *	It is assumed that the start and end are properly
16445528d7fdSmatt  *	rounded to the page size.
16455528d7fdSmatt  */
16465528d7fdSmatt 
1647b1425120Schristos static bool
pmap_pte_kremove(pmap_t pmap,vaddr_t sva,vaddr_t eva,pt_entry_t * ptep,uintptr_t flags)1648b1425120Schristos pmap_pte_kremove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
1649b1425120Schristos 	uintptr_t flags)
1650b1425120Schristos {
16515528d7fdSmatt 	const pt_entry_t new_pte = pte_nv_entry(true);
16525528d7fdSmatt 
1653e4535b97Sskrll 	UVMHIST_FUNC(__func__);
1654e4535b97Sskrll 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, sva=%#jx eva=%#jx ptep=%#jx)",
1655cb32a134Spgoyette 	    (uintptr_t)pmap, sva, eva, (uintptr_t)ptep);
1656b1425120Schristos 
1657b1425120Schristos 	KASSERT(kpreempt_disabled());
1658b1425120Schristos 
1659b1425120Schristos 	for (; sva < eva; sva += NBPG, ptep++) {
16605528d7fdSmatt 		pt_entry_t pte = *ptep;
16615528d7fdSmatt 		if (!pte_valid_p(pte))
1662b1425120Schristos 			continue;
1663b1425120Schristos 
1664b1425120Schristos 		PMAP_COUNT(kremove_pages);
1665a35e54e6Smatt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
1666bd0724a2Smrg 		struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte));
1667a35e54e6Smatt 		if (pg != NULL && pmap_md_virtual_cache_aliasing_p()) {
16685528d7fdSmatt 			pmap_remove_pv(pmap, sva, pg, !pte_readonly_p(pte));
16695528d7fdSmatt 		}
1670a35e54e6Smatt #endif
1671b1425120Schristos 
167229807ee5Sthorpej 		pmap_tlb_miss_lock_enter();
1673e90fc54cSskrll 		pte_set(ptep, new_pte);
16745528d7fdSmatt 		pmap_tlb_invalidate_addr(pmap, sva);
167529807ee5Sthorpej 		pmap_tlb_miss_lock_exit();
1676b1425120Schristos 	}
1677b1425120Schristos 
16785528d7fdSmatt 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
16795528d7fdSmatt 
1680b1425120Schristos 	return false;
1681b1425120Schristos }
1682b1425120Schristos 
1683b1425120Schristos void
pmap_kremove(vaddr_t va,vsize_t len)1684b1425120Schristos pmap_kremove(vaddr_t va, vsize_t len)
1685b1425120Schristos {
1686b1425120Schristos 	const vaddr_t sva = trunc_page(va);
1687b1425120Schristos 	const vaddr_t eva = round_page(va + len);
1688b1425120Schristos 
1689e4535b97Sskrll 	UVMHIST_FUNC(__func__);
1690e4535b97Sskrll 	UVMHIST_CALLARGS(pmaphist, "(va=%#jx len=%#jx)", va, len, 0, 0);
1691b1425120Schristos 
1692b1425120Schristos 	kpreempt_disable();
1693b1425120Schristos 	pmap_pte_process(pmap_kernel(), sva, eva, pmap_pte_kremove, 0);
1694b1425120Schristos 	kpreempt_enable();
1695b1425120Schristos 
16965528d7fdSmatt 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
1697b1425120Schristos }
1698b1425120Schristos 
1699be9c6147Sad bool
pmap_remove_all(struct pmap * pmap)1700b1425120Schristos pmap_remove_all(struct pmap *pmap)
1701b1425120Schristos {
1702e4535b97Sskrll 	UVMHIST_FUNC(__func__);
1703e4535b97Sskrll 	UVMHIST_CALLARGS(pmaphist, "(pm=%#jx)", (uintptr_t)pmap, 0, 0, 0);
17045528d7fdSmatt 
1705b1425120Schristos 	KASSERT(pmap != pmap_kernel());
1706b1425120Schristos 
1707b1425120Schristos 	kpreempt_disable();
1708b1425120Schristos 	/*
1709b1425120Schristos 	 * Free all of our ASIDs which means we can skip doing all the
1710b1425120Schristos 	 * tlb_invalidate_addrs().
1711b1425120Schristos 	 */
171229807ee5Sthorpej 	pmap_tlb_miss_lock_enter();
17135528d7fdSmatt #ifdef MULTIPROCESSOR
17145528d7fdSmatt 	// This should be the last CPU with this pmap onproc
17155528d7fdSmatt 	KASSERT(!kcpuset_isotherset(pmap->pm_onproc, cpu_index(curcpu())));
17165528d7fdSmatt 	if (kcpuset_isset(pmap->pm_onproc, cpu_index(curcpu())))
17175528d7fdSmatt #endif
1718b1425120Schristos 		pmap_tlb_asid_deactivate(pmap);
17195528d7fdSmatt #ifdef MULTIPROCESSOR
17205528d7fdSmatt 	KASSERT(kcpuset_iszero(pmap->pm_onproc));
17215528d7fdSmatt #endif
1722b1425120Schristos 	pmap_tlb_asid_release_all(pmap);
172329807ee5Sthorpej 	pmap_tlb_miss_lock_exit();
1724b1425120Schristos 	pmap->pm_flags |= PMAP_DEFERRED_ACTIVATE;
1725b1425120Schristos 
17265528d7fdSmatt #ifdef PMAP_FAULTINFO
17275528d7fdSmatt 	curpcb->pcb_faultinfo.pfi_faultaddr = 0;
17285528d7fdSmatt 	curpcb->pcb_faultinfo.pfi_repeats = 0;
1729ab6f34e3Sskrll 	curpcb->pcb_faultinfo.pfi_faultptep = NULL;
17305528d7fdSmatt #endif
1731b1425120Schristos 	kpreempt_enable();
17325528d7fdSmatt 
17335528d7fdSmatt 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
1734be9c6147Sad 	return false;
1735b1425120Schristos }
1736b1425120Schristos 
1737b1425120Schristos /*
1738b1425120Schristos  *	Routine:	pmap_unwire
1739b1425120Schristos  *	Function:	Clear the wired attribute for a map/virtual-address
1740b1425120Schristos  *			pair.
1741b1425120Schristos  *	In/out conditions:
1742b1425120Schristos  *			The mapping must already exist in the pmap.
1743b1425120Schristos  */
1744b1425120Schristos void
pmap_unwire(pmap_t pmap,vaddr_t va)1745b1425120Schristos pmap_unwire(pmap_t pmap, vaddr_t va)
1746b1425120Schristos {
1747e4535b97Sskrll 	UVMHIST_FUNC(__func__);
1748e4535b97Sskrll 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx)", (uintptr_t)pmap, va,
1749cb32a134Spgoyette 	    0, 0);
1750b1425120Schristos 	PMAP_COUNT(unwire);
1751b1425120Schristos 
1752b1425120Schristos 	/*
1753b1425120Schristos 	 * Don't need to flush the TLB since PG_WIRED is only in software.
1754b1425120Schristos 	 */
1755b1425120Schristos 	kpreempt_disable();
17565528d7fdSmatt 	pmap_addr_range_check(pmap, va, va, __func__);
1757b1425120Schristos 	pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
17585528d7fdSmatt 	KASSERTMSG(ptep != NULL, "pmap %p va %#"PRIxVADDR" invalid STE",
1759b1425120Schristos 	    pmap, va);
17605528d7fdSmatt 	pt_entry_t pte = *ptep;
17615528d7fdSmatt 	KASSERTMSG(pte_valid_p(pte),
17625528d7fdSmatt 	    "pmap %p va %#" PRIxVADDR " invalid PTE %#" PRIxPTE " @ %p",
17635528d7fdSmatt 	    pmap, va, pte_value(pte), ptep);
1764b1425120Schristos 
17655528d7fdSmatt 	if (pte_wired_p(pte)) {
176629807ee5Sthorpej 		pmap_tlb_miss_lock_enter();
1767e90fc54cSskrll 		pte_set(ptep, pte_unwire_entry(pte));
176829807ee5Sthorpej 		pmap_tlb_miss_lock_exit();
1769b1425120Schristos 		pmap->pm_stats.wired_count--;
1770b1425120Schristos 	}
1771b1425120Schristos #ifdef DIAGNOSTIC
1772b1425120Schristos 	else {
1773b1425120Schristos 		printf("%s: wiring for pmap %p va %#"PRIxVADDR" unchanged!\n",
1774b1425120Schristos 		    __func__, pmap, va);
1775b1425120Schristos 	}
1776b1425120Schristos #endif
1777b1425120Schristos 	kpreempt_enable();
17785528d7fdSmatt 
17795528d7fdSmatt 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
1780b1425120Schristos }
1781b1425120Schristos 
1782b1425120Schristos /*
1783b1425120Schristos  *	Routine:	pmap_extract
1784b1425120Schristos  *	Function:
1785b1425120Schristos  *		Extract the physical page address associated
1786b1425120Schristos  *		with the given map/virtual_address pair.
1787b1425120Schristos  */
1788b1425120Schristos bool
pmap_extract(pmap_t pmap,vaddr_t va,paddr_t * pap)1789b1425120Schristos pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
1790b1425120Schristos {
1791b1425120Schristos 	paddr_t pa;
1792b1425120Schristos 
1793b1425120Schristos 	if (pmap == pmap_kernel()) {
1794b1425120Schristos 		if (pmap_md_direct_mapped_vaddr_p(va)) {
1795b1425120Schristos 			pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
1796b1425120Schristos 			goto done;
1797b1425120Schristos 		}
1798b1425120Schristos 		if (pmap_md_io_vaddr_p(va))
1799b1425120Schristos 			panic("pmap_extract: io address %#"PRIxVADDR"", va);
18005528d7fdSmatt 
18015528d7fdSmatt 		if (va >= pmap_limits.virtual_end)
18025528d7fdSmatt 			panic("%s: illegal kernel mapped address %#"PRIxVADDR,
18035528d7fdSmatt 			    __func__, va);
1804b1425120Schristos 	}
1805b1425120Schristos 	kpreempt_disable();
18065528d7fdSmatt 	const pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
18075528d7fdSmatt 	if (ptep == NULL || !pte_valid_p(*ptep)) {
1808b1425120Schristos 		kpreempt_enable();
1809b1425120Schristos 		return false;
1810b1425120Schristos 	}
1811b1425120Schristos 	pa = pte_to_paddr(*ptep) | (va & PGOFSET);
1812b1425120Schristos 	kpreempt_enable();
1813b1425120Schristos done:
1814b1425120Schristos 	if (pap != NULL) {
1815b1425120Schristos 		*pap = pa;
1816b1425120Schristos 	}
1817b1425120Schristos 	return true;
1818b1425120Schristos }
1819b1425120Schristos 
1820b1425120Schristos /*
1821b1425120Schristos  *	Copy the range specified by src_addr/len
1822b1425120Schristos  *	from the source map to the range dst_addr/len
1823b1425120Schristos  *	in the destination map.
1824b1425120Schristos  *
1825b1425120Schristos  *	This routine is only advisory and need not do anything.
1826b1425120Schristos  */
1827b1425120Schristos void
pmap_copy(pmap_t dst_pmap,pmap_t src_pmap,vaddr_t dst_addr,vsize_t len,vaddr_t src_addr)1828b1425120Schristos pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len,
1829b1425120Schristos     vaddr_t src_addr)
1830b1425120Schristos {
1831e4535b97Sskrll 	UVMHIST_FUNC(__func__);
183297e917b2Sskrll 	UVMHIST_CALLARGS(pmaphist, "(dpm=#%jx spm=%#jx dva=%#jx sva=%#jx",
1833*fbd0dae4Sskrll 	    (uintptr_t)dst_pmap, (uintptr_t)src_pmap, dst_addr, src_addr);
183497e917b2Sskrll 	UVMHIST_LOG(pmaphist, "... len=%#jx)", len, 0, 0, 0);
1835b1425120Schristos 	PMAP_COUNT(copy);
1836b1425120Schristos }
1837b1425120Schristos 
1838b1425120Schristos /*
1839b1425120Schristos  *	pmap_clear_reference:
1840b1425120Schristos  *
1841b1425120Schristos  *	Clear the reference bit on the specified physical page.
1842b1425120Schristos  */
1843b1425120Schristos bool
pmap_clear_reference(struct vm_page * pg)1844b1425120Schristos pmap_clear_reference(struct vm_page *pg)
1845b1425120Schristos {
1846b1425120Schristos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
1847b1425120Schristos 
1848e4535b97Sskrll 	UVMHIST_FUNC(__func__);
1849e4535b97Sskrll 	UVMHIST_CALLARGS(pmaphist, "(pg=%#jx (pa %#jx))",
1850cb32a134Spgoyette 	   (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0,0);
1851b1425120Schristos 
1852b1425120Schristos 	bool rv = pmap_page_clear_attributes(mdpg, VM_PAGEMD_REFERENCED);
1853b1425120Schristos 
1854cb32a134Spgoyette 	UVMHIST_LOG(pmaphist, " <-- wasref %ju", rv, 0, 0, 0);
1855b1425120Schristos 
1856b1425120Schristos 	return rv;
1857b1425120Schristos }
1858b1425120Schristos 
1859b1425120Schristos /*
1860b1425120Schristos  *	pmap_is_referenced:
1861b1425120Schristos  *
1862b1425120Schristos  *	Return whether or not the specified physical page is referenced
1863b1425120Schristos  *	by any physical maps.
1864b1425120Schristos  */
1865b1425120Schristos bool
pmap_is_referenced(struct vm_page * pg)1866b1425120Schristos pmap_is_referenced(struct vm_page *pg)
1867b1425120Schristos {
1868b1425120Schristos 	return VM_PAGEMD_REFERENCED_P(VM_PAGE_TO_MD(pg));
1869b1425120Schristos }
1870b1425120Schristos 
1871b1425120Schristos /*
1872b1425120Schristos  *	Clear the modify bits on the specified physical page.
1873b1425120Schristos  */
1874b1425120Schristos bool
pmap_clear_modify(struct vm_page * pg)1875b1425120Schristos pmap_clear_modify(struct vm_page *pg)
1876b1425120Schristos {
1877b1425120Schristos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
1878b1425120Schristos 	pv_entry_t pv = &mdpg->mdpg_first;
1879b1425120Schristos 	pv_entry_t pv_next;
1880b1425120Schristos 
1881e4535b97Sskrll 	UVMHIST_FUNC(__func__);
1882e4535b97Sskrll 	UVMHIST_CALLARGS(pmaphist, "(pg=%#jx (%#jx))",
1883cb32a134Spgoyette 	    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0,0);
1884b1425120Schristos 	PMAP_COUNT(clear_modify);
1885b1425120Schristos 
1886b1425120Schristos 	if (VM_PAGEMD_EXECPAGE_P(mdpg)) {
1887b1425120Schristos 		if (pv->pv_pmap == NULL) {
1888b1425120Schristos 			UVMHIST_LOG(pmapexechist,
1889cb32a134Spgoyette 			    "pg %#jx (pa %#jx): execpage cleared",
1890cb32a134Spgoyette 			    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0, 0);
1891b1425120Schristos 			pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
1892b1425120Schristos 			PMAP_COUNT(exec_uncached_clear_modify);
1893b1425120Schristos 		} else {
1894b1425120Schristos 			UVMHIST_LOG(pmapexechist,
1895cb32a134Spgoyette 			    "pg %#jx (pa %#jx): syncicache performed",
1896cb32a134Spgoyette 			    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0, 0);
1897b1425120Schristos 			pmap_page_syncicache(pg);
1898b1425120Schristos 			PMAP_COUNT(exec_synced_clear_modify);
1899b1425120Schristos 		}
1900b1425120Schristos 	}
1901b1425120Schristos 	if (!pmap_page_clear_attributes(mdpg, VM_PAGEMD_MODIFIED)) {
19025528d7fdSmatt 		UVMHIST_LOG(pmaphist, " <-- false", 0, 0, 0, 0);
1903b1425120Schristos 		return false;
1904b1425120Schristos 	}
1905b1425120Schristos 	if (pv->pv_pmap == NULL) {
19065528d7fdSmatt 		UVMHIST_LOG(pmaphist, " <-- true (no mappings)", 0, 0, 0, 0);
1907b1425120Schristos 		return true;
1908b1425120Schristos 	}
1909b1425120Schristos 
1910b1425120Schristos 	/*
1911b1425120Schristos 	 * remove write access from any pages that are dirty
1912b1425120Schristos 	 * so we can tell if they are written to again later.
1913b1425120Schristos 	 * flush the VAC first if there is one.
1914b1425120Schristos 	 */
1915b1425120Schristos 	kpreempt_disable();
19165528d7fdSmatt 	VM_PAGEMD_PVLIST_READLOCK(mdpg);
19175528d7fdSmatt 	pmap_pvlist_check(mdpg);
1918b1425120Schristos 	for (; pv != NULL; pv = pv_next) {
1919b1425120Schristos 		pmap_t pmap = pv->pv_pmap;
19205528d7fdSmatt 		vaddr_t va = trunc_page(pv->pv_va);
19215528d7fdSmatt 
19225528d7fdSmatt 		pv_next = pv->pv_next;
19235528d7fdSmatt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
1924930577a5Sskrll 		if (PV_ISKENTER_P(pv))
19255528d7fdSmatt 			continue;
19265528d7fdSmatt #endif
1927b1425120Schristos 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
1928b1425120Schristos 		KASSERT(ptep);
19295528d7fdSmatt 		pt_entry_t pte = pte_prot_nowrite(*ptep);
19305528d7fdSmatt 		if (*ptep == pte) {
1931b1425120Schristos 			continue;
1932b1425120Schristos 		}
19335528d7fdSmatt 		KASSERT(pte_valid_p(pte));
19345528d7fdSmatt 		const uintptr_t gen = VM_PAGEMD_PVLIST_UNLOCK(mdpg);
193529807ee5Sthorpej 		pmap_tlb_miss_lock_enter();
1936e90fc54cSskrll 		pte_set(ptep, pte);
1937b1425120Schristos 		pmap_tlb_invalidate_addr(pmap, va);
193829807ee5Sthorpej 		pmap_tlb_miss_lock_exit();
1939b1425120Schristos 		pmap_update(pmap);
19405528d7fdSmatt 		if (__predict_false(gen != VM_PAGEMD_PVLIST_READLOCK(mdpg))) {
1941b1425120Schristos 			/*
1942b1425120Schristos 			 * The list changed!  So restart from the beginning.
1943b1425120Schristos 			 */
1944b1425120Schristos 			pv_next = &mdpg->mdpg_first;
19455528d7fdSmatt 			pmap_pvlist_check(mdpg);
1946b1425120Schristos 		}
1947b1425120Schristos 	}
19485528d7fdSmatt 	pmap_pvlist_check(mdpg);
1949b1425120Schristos 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
1950b1425120Schristos 	kpreempt_enable();
1951b1425120Schristos 
19525528d7fdSmatt 	UVMHIST_LOG(pmaphist, " <-- true (mappings changed)", 0, 0, 0, 0);
1953b1425120Schristos 	return true;
1954b1425120Schristos }
1955b1425120Schristos 
1956b1425120Schristos /*
1957b1425120Schristos  *	pmap_is_modified:
1958b1425120Schristos  *
1959b1425120Schristos  *	Return whether or not the specified physical page is modified
1960b1425120Schristos  *	by any physical maps.
1961b1425120Schristos  */
1962b1425120Schristos bool
pmap_is_modified(struct vm_page * pg)1963b1425120Schristos pmap_is_modified(struct vm_page *pg)
1964b1425120Schristos {
1965b1425120Schristos 	return VM_PAGEMD_MODIFIED_P(VM_PAGE_TO_MD(pg));
1966b1425120Schristos }
1967b1425120Schristos 
1968b1425120Schristos /*
1969b1425120Schristos  *	pmap_set_modified:
1970b1425120Schristos  *
1971b1425120Schristos  *	Sets the page modified reference bit for the specified page.
1972b1425120Schristos  */
1973b1425120Schristos void
pmap_set_modified(paddr_t pa)1974b1425120Schristos pmap_set_modified(paddr_t pa)
1975b1425120Schristos {
1976b1425120Schristos 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
1977b1425120Schristos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
1978b1425120Schristos 	pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED | VM_PAGEMD_REFERENCED);
1979b1425120Schristos }
1980b1425120Schristos 
1981b1425120Schristos /******************** pv_entry management ********************/
1982b1425120Schristos 
1983b1425120Schristos static void
pmap_pvlist_check(struct vm_page_md * mdpg)19845528d7fdSmatt pmap_pvlist_check(struct vm_page_md *mdpg)
1985b1425120Schristos {
19865528d7fdSmatt #ifdef DEBUG
19875528d7fdSmatt 	pv_entry_t pv = &mdpg->mdpg_first;
1988b1425120Schristos 	if (pv->pv_pmap != NULL) {
19895528d7fdSmatt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
19905528d7fdSmatt 		const u_int colormask = uvmexp.colormask;
19915528d7fdSmatt 		u_int colors = 0;
19925528d7fdSmatt #endif
1993b1425120Schristos 		for (; pv != NULL; pv = pv->pv_next) {
19945528d7fdSmatt 			KASSERT(pv->pv_pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(pv->pv_va));
19955528d7fdSmatt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
19965528d7fdSmatt 			colors |= __BIT(atop(pv->pv_va) & colormask);
19975528d7fdSmatt #endif
1998b1425120Schristos 		}
19995528d7fdSmatt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
20003ec91037Sskrll 		// Assert that if there is more than 1 color mapped, that the
20013ec91037Sskrll 		// page is uncached.
20025528d7fdSmatt 		KASSERTMSG(!pmap_md_virtual_cache_aliasing_p()
20035528d7fdSmatt 		    || colors == 0 || (colors & (colors-1)) == 0
20045528d7fdSmatt 		    || VM_PAGEMD_UNCACHED_P(mdpg), "colors=%#x uncached=%u",
20055528d7fdSmatt 		    colors, VM_PAGEMD_UNCACHED_P(mdpg));
20065528d7fdSmatt #endif
20072a891dc6Sskrll 	} else {
20082a891dc6Sskrll 		KASSERT(pv->pv_next == NULL);
2009b1425120Schristos 	}
20105528d7fdSmatt #endif /* DEBUG */
2011b1425120Schristos }
2012b1425120Schristos 
2013b1425120Schristos /*
2014b1425120Schristos  * Enter the pmap and virtual address into the
2015b1425120Schristos  * physical to virtual map table.
2016b1425120Schristos  */
2017b1425120Schristos void
pmap_enter_pv(pmap_t pmap,vaddr_t va,paddr_t pa,struct vm_page_md * mdpg,pt_entry_t * nptep,u_int flags)201831d27c36Sskrll pmap_enter_pv(pmap_t pmap, vaddr_t va, paddr_t pa, struct vm_page_md *mdpg,
201931d27c36Sskrll     pt_entry_t *nptep, u_int flags)
2020b1425120Schristos {
2021b1425120Schristos 	pv_entry_t pv, npv, apv;
20225528d7fdSmatt #ifdef UVMHIST
20235528d7fdSmatt 	bool first = false;
202431d27c36Sskrll 	struct vm_page *pg = VM_PAGEMD_VMPAGE_P(mdpg) ? VM_MD_TO_PAGE(mdpg) :
202531d27c36Sskrll 	    NULL;
20265528d7fdSmatt #endif
2027b1425120Schristos 
2028e4535b97Sskrll 	UVMHIST_FUNC(__func__);
2029e4535b97Sskrll 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx va=%#jx pg=%#jx (%#jx)",
203031d27c36Sskrll 	    (uintptr_t)pmap, va, (uintptr_t)pg, pa);
2031cb32a134Spgoyette 	UVMHIST_LOG(pmaphist, "nptep=%#jx (%#jx))",
2032cb32a134Spgoyette 	    (uintptr_t)nptep, pte_value(*nptep), 0, 0);
2033b1425120Schristos 
2034b1425120Schristos 	KASSERT(kpreempt_disabled());
2035b1425120Schristos 	KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va));
20365528d7fdSmatt 	KASSERTMSG(pmap != pmap_kernel() || !pmap_md_io_vaddr_p(va),
20375528d7fdSmatt 	    "va %#"PRIxVADDR, va);
2038b1425120Schristos 
2039b1425120Schristos 	apv = NULL;
20405528d7fdSmatt 	VM_PAGEMD_PVLIST_LOCK(mdpg);
2041b1425120Schristos again:
20425528d7fdSmatt 	pv = &mdpg->mdpg_first;
20435528d7fdSmatt 	pmap_pvlist_check(mdpg);
2044b1425120Schristos 	if (pv->pv_pmap == NULL) {
2045b1425120Schristos 		KASSERT(pv->pv_next == NULL);
2046b1425120Schristos 		/*
2047b1425120Schristos 		 * No entries yet, use header as the first entry
2048b1425120Schristos 		 */
2049b1425120Schristos 		PMAP_COUNT(primary_mappings);
2050b1425120Schristos 		PMAP_COUNT(mappings);
20515528d7fdSmatt #ifdef UVMHIST
2052b1425120Schristos 		first = true;
20535528d7fdSmatt #endif
20545528d7fdSmatt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
20555528d7fdSmatt 		KASSERT(VM_PAGEMD_CACHED_P(mdpg));
20565528d7fdSmatt 		// If the new mapping has an incompatible color the last
20575528d7fdSmatt 		// mapping of this page, clean the page before using it.
20585528d7fdSmatt 		if (!PMAP_PAGE_COLOROK_P(va, pv->pv_va)) {
205931d27c36Sskrll 			pmap_md_vca_clean(mdpg, PMAP_WBINV);
20605528d7fdSmatt 		}
2061b1425120Schristos #endif
2062b1425120Schristos 		pv->pv_pmap = pmap;
20635528d7fdSmatt 		pv->pv_va = va | flags;
2064b1425120Schristos 	} else {
20655528d7fdSmatt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
206631d27c36Sskrll 		if (pmap_md_vca_add(mdpg, va, nptep)) {
2067b1425120Schristos 			goto again;
20685528d7fdSmatt 		}
20695528d7fdSmatt #endif
2070b1425120Schristos 
2071b1425120Schristos 		/*
2072b1425120Schristos 		 * There is at least one other VA mapping this page.
2073b1425120Schristos 		 * Place this entry after the header.
2074b1425120Schristos 		 *
2075b1425120Schristos 		 * Note: the entry may already be in the table if
2076b1425120Schristos 		 * we are only changing the protection bits.
2077b1425120Schristos 		 */
2078b1425120Schristos 
2079b1425120Schristos 		for (npv = pv; npv; npv = npv->pv_next) {
20805528d7fdSmatt 			if (pmap == npv->pv_pmap
20815528d7fdSmatt 			    && va == trunc_page(npv->pv_va)) {
2082b1425120Schristos #ifdef PARANOIADIAG
2083b1425120Schristos 				pt_entry_t *ptep = pmap_pte_lookup(pmap, va);
20845528d7fdSmatt 				pt_entry_t pte = (ptep != NULL) ? *ptep : 0;
20855528d7fdSmatt 				if (!pte_valid_p(pte) || pte_to_paddr(pte) != pa)
20865528d7fdSmatt 					printf("%s: found va %#"PRIxVADDR
20875528d7fdSmatt 					    " pa %#"PRIxPADDR
20885528d7fdSmatt 					    " in pv_table but != %#"PRIxPTE"\n",
20895528d7fdSmatt 					    __func__, va, pa, pte_value(pte));
2090b1425120Schristos #endif
2091b1425120Schristos 				PMAP_COUNT(remappings);
2092b1425120Schristos 				VM_PAGEMD_PVLIST_UNLOCK(mdpg);
2093b1425120Schristos 				if (__predict_false(apv != NULL))
2094b1425120Schristos 					pmap_pv_free(apv);
20955528d7fdSmatt 
2096cb32a134Spgoyette 				UVMHIST_LOG(pmaphist,
2097cb32a134Spgoyette 				    " <-- done pv=%#jx (reused)",
2098cb32a134Spgoyette 				    (uintptr_t)pv, 0, 0, 0);
2099b1425120Schristos 				return;
2100b1425120Schristos 			}
2101b1425120Schristos 		}
2102b1425120Schristos 		if (__predict_true(apv == NULL)) {
2103b1425120Schristos 			/*
2104b1425120Schristos 			 * To allocate a PV, we have to release the PVLIST lock
2105b1425120Schristos 			 * so get the page generation.  We allocate the PV, and
2106b1425120Schristos 			 * then reacquire the lock.
2107b1425120Schristos 			 */
21085528d7fdSmatt 			pmap_pvlist_check(mdpg);
21095528d7fdSmatt 			const uintptr_t gen = VM_PAGEMD_PVLIST_UNLOCK(mdpg);
2110b1425120Schristos 
2111b1425120Schristos 			apv = (pv_entry_t)pmap_pv_alloc();
2112b1425120Schristos 			if (apv == NULL)
2113b1425120Schristos 				panic("pmap_enter_pv: pmap_pv_alloc() failed");
2114b1425120Schristos 
2115b1425120Schristos 			/*
2116b1425120Schristos 			 * If the generation has changed, then someone else
21175528d7fdSmatt 			 * tinkered with this page so we should start over.
2118b1425120Schristos 			 */
21195528d7fdSmatt 			if (gen != VM_PAGEMD_PVLIST_LOCK(mdpg))
2120b1425120Schristos 				goto again;
2121b1425120Schristos 		}
2122b1425120Schristos 		npv = apv;
2123b1425120Schristos 		apv = NULL;
21245528d7fdSmatt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
21255528d7fdSmatt 		/*
21265528d7fdSmatt 		 * If need to deal with virtual cache aliases, keep mappings
21275528d7fdSmatt 		 * in the kernel pmap at the head of the list.  This allows
21285528d7fdSmatt 		 * the VCA code to easily use them for cache operations if
21295528d7fdSmatt 		 * present.
21305528d7fdSmatt 		 */
21315528d7fdSmatt 		pmap_t kpmap = pmap_kernel();
21325528d7fdSmatt 		if (pmap != kpmap) {
21335528d7fdSmatt 			while (pv->pv_pmap == kpmap && pv->pv_next != NULL) {
21345528d7fdSmatt 				pv = pv->pv_next;
21355528d7fdSmatt 			}
21365528d7fdSmatt 		}
21375528d7fdSmatt #endif
21385528d7fdSmatt 		npv->pv_va = va | flags;
2139b1425120Schristos 		npv->pv_pmap = pmap;
2140b1425120Schristos 		npv->pv_next = pv->pv_next;
2141b1425120Schristos 		pv->pv_next = npv;
2142b1425120Schristos 		PMAP_COUNT(mappings);
2143b1425120Schristos 	}
21445528d7fdSmatt 	pmap_pvlist_check(mdpg);
2145b1425120Schristos 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
2146b1425120Schristos 	if (__predict_false(apv != NULL))
2147b1425120Schristos 		pmap_pv_free(apv);
2148b1425120Schristos 
2149cb32a134Spgoyette 	UVMHIST_LOG(pmaphist, " <-- done pv=%#jx (first %ju)", (uintptr_t)pv,
2150cb32a134Spgoyette 	    first, 0, 0);
2151b1425120Schristos }
2152b1425120Schristos 
2153b1425120Schristos /*
2154b1425120Schristos  * Remove a physical to virtual address translation.
2155b1425120Schristos  * If cache was inhibited on this page, and there are no more cache
2156b1425120Schristos  * conflicts, restore caching.
2157b1425120Schristos  * Flush the cache if the last page is removed (should always be cached
2158b1425120Schristos  * at this point).
2159b1425120Schristos  */
2160b1425120Schristos void
pmap_remove_pv(pmap_t pmap,vaddr_t va,struct vm_page * pg,bool dirty)2161b1425120Schristos pmap_remove_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, bool dirty)
2162b1425120Schristos {
2163b1425120Schristos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
2164b1425120Schristos 	pv_entry_t pv, npv;
2165b1425120Schristos 	bool last;
2166b1425120Schristos 
2167e4535b97Sskrll 	UVMHIST_FUNC(__func__);
2168e4535b97Sskrll 	UVMHIST_CALLARGS(pmaphist, "(pmap=%#jx, va=%#jx, pg=%#jx (pa %#jx)",
2169cb32a134Spgoyette 	    (uintptr_t)pmap, va, (uintptr_t)pg, VM_PAGE_TO_PHYS(pg));
2170cb32a134Spgoyette 	UVMHIST_LOG(pmaphist, "dirty=%ju)", dirty, 0, 0, 0);
2171b1425120Schristos 
2172b1425120Schristos 	KASSERT(kpreempt_disabled());
21735528d7fdSmatt 	KASSERT((va & PAGE_MASK) == 0);
2174b1425120Schristos 	pv = &mdpg->mdpg_first;
2175b1425120Schristos 
21765528d7fdSmatt 	VM_PAGEMD_PVLIST_LOCK(mdpg);
21775528d7fdSmatt 	pmap_pvlist_check(mdpg);
2178b1425120Schristos 
2179b1425120Schristos 	/*
2180b1425120Schristos 	 * If it is the first entry on the list, it is actually
2181b1425120Schristos 	 * in the header and we must copy the following entry up
2182b1425120Schristos 	 * to the header.  Otherwise we must search the list for
2183b1425120Schristos 	 * the entry.  In either case we free the now unused entry.
2184b1425120Schristos 	 */
2185b1425120Schristos 
2186b1425120Schristos 	last = false;
21875528d7fdSmatt 	if (pmap == pv->pv_pmap && va == trunc_page(pv->pv_va)) {
2188b1425120Schristos 		npv = pv->pv_next;
2189b1425120Schristos 		if (npv) {
2190b1425120Schristos 			*pv = *npv;
2191b1425120Schristos 			KASSERT(pv->pv_pmap != NULL);
2192b1425120Schristos 		} else {
21935528d7fdSmatt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
21945528d7fdSmatt 			pmap_page_clear_attributes(mdpg, VM_PAGEMD_UNCACHED);
2195b1425120Schristos #endif
2196b1425120Schristos 			pv->pv_pmap = NULL;
2197b1425120Schristos 			last = true;	/* Last mapping removed */
2198b1425120Schristos 		}
2199b1425120Schristos 		PMAP_COUNT(remove_pvfirst);
2200b1425120Schristos 	} else {
2201b1425120Schristos 		for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
2202b1425120Schristos 			PMAP_COUNT(remove_pvsearch);
22035528d7fdSmatt 			if (pmap == npv->pv_pmap && va == trunc_page(npv->pv_va))
2204b1425120Schristos 				break;
2205b1425120Schristos 		}
2206b1425120Schristos 		if (npv) {
2207b1425120Schristos 			pv->pv_next = npv->pv_next;
2208b1425120Schristos 		}
2209b1425120Schristos 	}
2210b1425120Schristos 
22115528d7fdSmatt 	pmap_pvlist_check(mdpg);
2212b1425120Schristos 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
2213b1425120Schristos 
22145528d7fdSmatt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
22155528d7fdSmatt 	pmap_md_vca_remove(pg, va, dirty, last);
22165528d7fdSmatt #endif
22175528d7fdSmatt 
2218b1425120Schristos 	/*
2219b1425120Schristos 	 * Free the pv_entry if needed.
2220b1425120Schristos 	 */
2221b1425120Schristos 	if (npv)
2222b1425120Schristos 		pmap_pv_free(npv);
2223b1425120Schristos 	if (VM_PAGEMD_EXECPAGE_P(mdpg) && dirty) {
2224b1425120Schristos 		if (last) {
2225b1425120Schristos 			/*
2226b1425120Schristos 			 * If this was the page's last mapping, we no longer
2227b1425120Schristos 			 * care about its execness.
2228b1425120Schristos 			 */
2229b1425120Schristos 			UVMHIST_LOG(pmapexechist,
2230cb32a134Spgoyette 			    "pg %#jx (pa %#jx)last %ju: execpage cleared",
2231cb32a134Spgoyette 			    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), last, 0);
2232b1425120Schristos 			pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
2233b1425120Schristos 			PMAP_COUNT(exec_uncached_remove);
2234b1425120Schristos 		} else {
2235b1425120Schristos 			/*
2236b1425120Schristos 			 * Someone still has it mapped as an executable page
2237b1425120Schristos 			 * so we must sync it.
2238b1425120Schristos 			 */
2239b1425120Schristos 			UVMHIST_LOG(pmapexechist,
2240cb32a134Spgoyette 			    "pg %#jx (pa %#jx) last %ju: performed syncicache",
2241cb32a134Spgoyette 			    (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), last, 0);
2242b1425120Schristos 			pmap_page_syncicache(pg);
2243b1425120Schristos 			PMAP_COUNT(exec_synced_remove);
2244b1425120Schristos 		}
2245b1425120Schristos 	}
22465528d7fdSmatt 
22475528d7fdSmatt 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
2248b1425120Schristos }
2249b1425120Schristos 
2250b1425120Schristos #if defined(MULTIPROCESSOR)
2251b1425120Schristos struct pmap_pvlist_info {
2252b1425120Schristos 	kmutex_t *pli_locks[PAGE_SIZE / 32];
2253b1425120Schristos 	volatile u_int pli_lock_refs[PAGE_SIZE / 32];
2254b1425120Schristos 	volatile u_int pli_lock_index;
2255b1425120Schristos 	u_int pli_lock_mask;
2256b1425120Schristos } pmap_pvlist_info;
2257b1425120Schristos 
2258b1425120Schristos void
pmap_pvlist_lock_init(size_t cache_line_size)2259b1425120Schristos pmap_pvlist_lock_init(size_t cache_line_size)
2260b1425120Schristos {
2261b1425120Schristos 	struct pmap_pvlist_info * const pli = &pmap_pvlist_info;
2262b1425120Schristos 	const vaddr_t lock_page = uvm_pageboot_alloc(PAGE_SIZE);
2263b1425120Schristos 	vaddr_t lock_va = lock_page;
2264b1425120Schristos 	if (sizeof(kmutex_t) > cache_line_size) {
2265b1425120Schristos 		cache_line_size = roundup2(sizeof(kmutex_t), cache_line_size);
2266b1425120Schristos 	}
2267b1425120Schristos 	const size_t nlocks = PAGE_SIZE / cache_line_size;
2268b1425120Schristos 	KASSERT((nlocks & (nlocks - 1)) == 0);
2269b1425120Schristos 	/*
2270b1425120Schristos 	 * Now divide the page into a number of mutexes, one per cacheline.
2271b1425120Schristos 	 */
2272b1425120Schristos 	for (size_t i = 0; i < nlocks; lock_va += cache_line_size, i++) {
2273b1425120Schristos 		kmutex_t * const lock = (kmutex_t *)lock_va;
22745528d7fdSmatt 		mutex_init(lock, MUTEX_DEFAULT, IPL_HIGH);
2275b1425120Schristos 		pli->pli_locks[i] = lock;
2276b1425120Schristos 	}
2277b1425120Schristos 	pli->pli_lock_mask = nlocks - 1;
2278b1425120Schristos }
2279b1425120Schristos 
22805528d7fdSmatt kmutex_t *
pmap_pvlist_lock_addr(struct vm_page_md * mdpg)22815528d7fdSmatt pmap_pvlist_lock_addr(struct vm_page_md *mdpg)
2282b1425120Schristos {
2283b1425120Schristos 	struct pmap_pvlist_info * const pli = &pmap_pvlist_info;
2284b1425120Schristos 	kmutex_t *lock = mdpg->mdpg_lock;
2285b1425120Schristos 
2286b1425120Schristos 	/*
2287b1425120Schristos 	 * Allocate a lock on an as-needed basis.  This will hopefully give us
2288b1425120Schristos 	 * semi-random distribution not based on page color.
2289b1425120Schristos 	 */
2290b1425120Schristos 	if (__predict_false(lock == NULL)) {
2291b1425120Schristos 		size_t locknum = atomic_add_int_nv(&pli->pli_lock_index, 37);
2292b1425120Schristos 		size_t lockid = locknum & pli->pli_lock_mask;
2293b1425120Schristos 		kmutex_t * const new_lock = pli->pli_locks[lockid];
2294b1425120Schristos 		/*
2295b1425120Schristos 		 * Set the lock.  If some other thread already did, just use
2296b1425120Schristos 		 * the one they assigned.
2297b1425120Schristos 		 */
2298b1425120Schristos 		lock = atomic_cas_ptr(&mdpg->mdpg_lock, NULL, new_lock);
2299b1425120Schristos 		if (lock == NULL) {
2300b1425120Schristos 			lock = new_lock;
2301b1425120Schristos 			atomic_inc_uint(&pli->pli_lock_refs[lockid]);
2302b1425120Schristos 		}
2303b1425120Schristos 	}
2304b1425120Schristos 
2305b1425120Schristos 	/*
23065528d7fdSmatt 	 * Now finally provide the lock.
2307b1425120Schristos 	 */
23085528d7fdSmatt 	return lock;
2309b1425120Schristos }
2310b1425120Schristos #else /* !MULTIPROCESSOR */
2311b1425120Schristos void
pmap_pvlist_lock_init(size_t cache_line_size)2312b1425120Schristos pmap_pvlist_lock_init(size_t cache_line_size)
2313b1425120Schristos {
23145528d7fdSmatt 	mutex_init(&pmap_pvlist_mutex, MUTEX_DEFAULT, IPL_HIGH);
2315b1425120Schristos }
2316b1425120Schristos 
2317b1425120Schristos #ifdef MODULAR
23185528d7fdSmatt kmutex_t *
pmap_pvlist_lock_addr(struct vm_page_md * mdpg)23195528d7fdSmatt pmap_pvlist_lock_addr(struct vm_page_md *mdpg)
2320b1425120Schristos {
2321b1425120Schristos 	/*
2322b1425120Schristos 	 * We just use a global lock.
2323b1425120Schristos 	 */
2324b1425120Schristos 	if (__predict_false(mdpg->mdpg_lock == NULL)) {
2325b1425120Schristos 		mdpg->mdpg_lock = &pmap_pvlist_mutex;
2326b1425120Schristos 	}
2327b1425120Schristos 
2328b1425120Schristos 	/*
23295528d7fdSmatt 	 * Now finally provide the lock.
2330b1425120Schristos 	 */
23315528d7fdSmatt 	return mdpg->mdpg_lock;
2332b1425120Schristos }
2333b1425120Schristos #endif /* MODULAR */
2334b1425120Schristos #endif /* !MULTIPROCESSOR */
2335b1425120Schristos 
2336b1425120Schristos /*
2337b1425120Schristos  * pmap_pv_page_alloc:
2338b1425120Schristos  *
2339b1425120Schristos  *	Allocate a page for the pv_entry pool.
2340b1425120Schristos  */
2341b1425120Schristos void *
pmap_pv_page_alloc(struct pool * pp,int flags)2342b1425120Schristos pmap_pv_page_alloc(struct pool *pp, int flags)
2343b1425120Schristos {
2344196ee94dSskrll 	struct vm_page * const pg = pmap_md_alloc_poolpage(UVM_PGA_USERESERVE);
2345b1425120Schristos 	if (pg == NULL)
2346b1425120Schristos 		return NULL;
2347b1425120Schristos 
2348196ee94dSskrll 	return (void *)pmap_md_map_poolpage(VM_PAGE_TO_PHYS(pg), PAGE_SIZE);
2349b1425120Schristos }
2350b1425120Schristos 
2351b1425120Schristos /*
2352b1425120Schristos  * pmap_pv_page_free:
2353b1425120Schristos  *
2354b1425120Schristos  *	Free a pv_entry pool page.
2355b1425120Schristos  */
2356b1425120Schristos void
pmap_pv_page_free(struct pool * pp,void * v)2357b1425120Schristos pmap_pv_page_free(struct pool *pp, void *v)
2358b1425120Schristos {
2359b1425120Schristos 	vaddr_t va = (vaddr_t)v;
2360b1425120Schristos 
2361b1425120Schristos 	KASSERT(pmap_md_direct_mapped_vaddr_p(va));
2362b1425120Schristos 	const paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
2363b1425120Schristos 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
23645528d7fdSmatt 	KASSERT(pg != NULL);
23655528d7fdSmatt #ifdef PMAP_VIRTUAL_CACHE_ALIASES
23665528d7fdSmatt 	kpreempt_disable();
23675528d7fdSmatt 	pmap_md_vca_remove(pg, va, true, true);
23685528d7fdSmatt 	kpreempt_enable();
23695528d7fdSmatt #endif
23705528d7fdSmatt 	pmap_page_clear_attributes(VM_PAGE_TO_MD(pg), VM_PAGEMD_POOLPAGE);
23712a891dc6Sskrll 	KASSERT(!VM_PAGEMD_EXECPAGE_P(VM_PAGE_TO_MD(pg)));
2372b1425120Schristos 	uvm_pagefree(pg);
2373b1425120Schristos }
2374b1425120Schristos 
2375b1425120Schristos #ifdef PMAP_PREFER
2376b1425120Schristos /*
2377b1425120Schristos  * Find first virtual address >= *vap that doesn't cause
2378b1425120Schristos  * a cache alias conflict.
2379b1425120Schristos  */
2380b1425120Schristos void
pmap_prefer(vaddr_t foff,vaddr_t * vap,vsize_t sz,int td)2381b1425120Schristos pmap_prefer(vaddr_t foff, vaddr_t *vap, vsize_t sz, int td)
2382b1425120Schristos {
2383b1425120Schristos 	vsize_t prefer_mask = ptoa(uvmexp.colormask);
2384b1425120Schristos 
2385b1425120Schristos 	PMAP_COUNT(prefer_requests);
2386b1425120Schristos 
2387b1425120Schristos 	prefer_mask |= pmap_md_cache_prefer_mask();
2388b1425120Schristos 
2389b1425120Schristos 	if (prefer_mask) {
23905528d7fdSmatt 		vaddr_t	va = *vap;
23915528d7fdSmatt 		vsize_t d = (foff - va) & prefer_mask;
2392b1425120Schristos 		if (d) {
2393b1425120Schristos 			if (td)
2394b1425120Schristos 				*vap = trunc_page(va - ((-d) & prefer_mask));
2395b1425120Schristos 			else
2396b1425120Schristos 				*vap = round_page(va + d);
2397b1425120Schristos 			PMAP_COUNT(prefer_adjustments);
2398b1425120Schristos 		}
2399b1425120Schristos 	}
2400b1425120Schristos }
2401b1425120Schristos #endif /* PMAP_PREFER */
2402b1425120Schristos 
2403b1425120Schristos #ifdef PMAP_MAP_POOLPAGE
2404b1425120Schristos vaddr_t
pmap_map_poolpage(paddr_t pa)2405b1425120Schristos pmap_map_poolpage(paddr_t pa)
2406b1425120Schristos {
2407b1425120Schristos 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
2408b1425120Schristos 	KASSERT(pg);
24092a891dc6Sskrll 
2410b1425120Schristos 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
24112a891dc6Sskrll 	KASSERT(!VM_PAGEMD_EXECPAGE_P(mdpg));
24122a891dc6Sskrll 
2413b1425120Schristos 	pmap_page_set_attributes(mdpg, VM_PAGEMD_POOLPAGE);
2414b1425120Schristos 
24155528d7fdSmatt 	return pmap_md_map_poolpage(pa, NBPG);
2416b1425120Schristos }
2417b1425120Schristos 
2418b1425120Schristos paddr_t
pmap_unmap_poolpage(vaddr_t va)2419b1425120Schristos pmap_unmap_poolpage(vaddr_t va)
2420b1425120Schristos {
2421b1425120Schristos 	KASSERT(pmap_md_direct_mapped_vaddr_p(va));
2422b1425120Schristos 	paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
2423b1425120Schristos 
2424b1425120Schristos 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
24255528d7fdSmatt 	KASSERT(pg != NULL);
24262a891dc6Sskrll 	KASSERT(!VM_PAGEMD_EXECPAGE_P(VM_PAGE_TO_MD(pg)));
24272a891dc6Sskrll 
24285528d7fdSmatt 	pmap_page_clear_attributes(VM_PAGE_TO_MD(pg), VM_PAGEMD_POOLPAGE);
2429b1425120Schristos 	pmap_md_unmap_poolpage(va, NBPG);
2430b1425120Schristos 
2431b1425120Schristos 	return pa;
2432b1425120Schristos }
2433b1425120Schristos #endif /* PMAP_MAP_POOLPAGE */
2434196ee94dSskrll 
2435196ee94dSskrll #ifdef DDB
2436196ee94dSskrll void
2437196ee94dSskrll pmap_db_mdpg_print(struct vm_page *pg, void (*pr)(const char *, ...) __printflike(1, 2))
2438196ee94dSskrll {
2439196ee94dSskrll 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
2440196ee94dSskrll 	pv_entry_t pv = &mdpg->mdpg_first;
2441196ee94dSskrll 
2442196ee94dSskrll 	if (pv->pv_pmap == NULL) {
2443196ee94dSskrll 		pr(" no mappings\n");
2444196ee94dSskrll 		return;
2445196ee94dSskrll 	}
2446196ee94dSskrll 
2447196ee94dSskrll 	int lcount = 0;
2448196ee94dSskrll 	if (VM_PAGEMD_VMPAGE_P(mdpg)) {
2449196ee94dSskrll 		pr(" vmpage");
2450196ee94dSskrll 		lcount++;
2451196ee94dSskrll 	}
2452196ee94dSskrll 	if (VM_PAGEMD_POOLPAGE_P(mdpg)) {
2453196ee94dSskrll 		if (lcount != 0)
2454196ee94dSskrll 			pr(",");
2455196ee94dSskrll 		pr(" pool");
2456196ee94dSskrll 		lcount++;
2457196ee94dSskrll 	}
2458196ee94dSskrll #ifdef PMAP_VIRTUAL_CACHE_ALIASES
2459196ee94dSskrll 	if (VM_PAGEMD_UNCACHED_P(mdpg)) {
2460196ee94dSskrll 		if (lcount != 0)
2461196ee94dSskrll 			pr(",");
2462196ee94dSskrll 		pr(" uncached\n");
2463196ee94dSskrll 	}
2464196ee94dSskrll #endif
2465196ee94dSskrll 	pr("\n");
2466196ee94dSskrll 
2467196ee94dSskrll 	lcount = 0;
2468196ee94dSskrll 	if (VM_PAGEMD_REFERENCED_P(mdpg)) {
2469196ee94dSskrll 		pr(" referened");
2470196ee94dSskrll 		lcount++;
2471196ee94dSskrll 	}
2472196ee94dSskrll 	if (VM_PAGEMD_MODIFIED_P(mdpg)) {
2473196ee94dSskrll 		if (lcount != 0)
2474196ee94dSskrll 			pr(",");
2475196ee94dSskrll 		pr(" modified");
2476196ee94dSskrll 		lcount++;
2477196ee94dSskrll 	}
2478196ee94dSskrll 	if (VM_PAGEMD_EXECPAGE_P(mdpg)) {
2479196ee94dSskrll 		if (lcount != 0)
2480196ee94dSskrll 			pr(",");
2481196ee94dSskrll 		pr(" exec");
2482196ee94dSskrll 		lcount++;
2483196ee94dSskrll 	}
2484196ee94dSskrll 	pr("\n");
2485196ee94dSskrll 
2486196ee94dSskrll 	for (size_t i = 0; pv != NULL; pv = pv->pv_next) {
2487196ee94dSskrll 		pr("  pv[%zu] pv=%p\n", i, pv);
2488196ee94dSskrll 		pr("    pv[%zu].pv_pmap = %p", i, pv->pv_pmap);
2489196ee94dSskrll 		pr("    pv[%zu].pv_va   = %" PRIxVADDR " (kenter=%s)\n",
2490196ee94dSskrll 		    i, trunc_page(pv->pv_va), PV_ISKENTER_P(pv) ? "true" : "false");
2491196ee94dSskrll 		i++;
2492196ee94dSskrll 	}
2493196ee94dSskrll }
2494196ee94dSskrll 
2495196ee94dSskrll void
2496196ee94dSskrll pmap_db_pmap_print(struct pmap *pm,
2497196ee94dSskrll     void (*pr)(const char *, ...) __printflike(1, 2))
2498196ee94dSskrll {
2499196ee94dSskrll #if defined(PMAP_HWPAGEWALKER)
2500196ee94dSskrll 	pr(" pm_pdetab     = %p\n", pm->pm_pdetab);
2501196ee94dSskrll #endif
2502196ee94dSskrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
2503196ee94dSskrll 	pr(" pm_segtab     = %p\n", pm->pm_segtab);
2504196ee94dSskrll #endif
2505196ee94dSskrll 
2506196ee94dSskrll 	pmap_db_tlb_print(pm, pr);
2507196ee94dSskrll }
2508196ee94dSskrll #endif /* DDB */
2509