xref: /netbsd-src/sys/uvm/pmap/pmap.c (revision 63aea4bd5b445e491ff0389fe27ec78b3099dba3)
1 /*	$NetBSD: pmap.c,v 1.13 2015/11/05 00:12:28 pgoyette Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center and by Chris G. Demetriou.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1992, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * This code is derived from software contributed to Berkeley by
38  * the Systems Programming Group of the University of Utah Computer
39  * Science Department and Ralph Campbell.
40  *
41  * Redistribution and use in source and binary forms, with or without
42  * modification, are permitted provided that the following conditions
43  * are met:
44  * 1. Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in the
48  *    documentation and/or other materials provided with the distribution.
49  * 3. Neither the name of the University nor the names of its contributors
50  *    may be used to endorse or promote products derived from this software
51  *    without specific prior written permission.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63  * SUCH DAMAGE.
64  *
65  *	@(#)pmap.c	8.4 (Berkeley) 1/26/94
66  */
67 
68 #include <sys/cdefs.h>
69 
70 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.13 2015/11/05 00:12:28 pgoyette Exp $");
71 
72 /*
73  *	Manages physical address maps.
74  *
75  *	In addition to hardware address maps, this
76  *	module is called upon to provide software-use-only
77  *	maps which may or may not be stored in the same
78  *	form as hardware maps.  These pseudo-maps are
79  *	used to store intermediate results from copy
80  *	operations to and from address spaces.
81  *
82  *	Since the information managed by this module is
83  *	also stored by the logical address mapping module,
84  *	this module may throw away valid virtual-to-physical
85  *	mappings at almost any time.  However, invalidations
86  *	of virtual-to-physical mappings must be done as
87  *	requested.
88  *
89  *	In order to cope with hardware architectures which
90  *	make virtual-to-physical map invalidates expensive,
91  *	this module may delay invalidate or reduced protection
92  *	operations until such time as they are actually
93  *	necessary.  This module is given full information as
94  *	to which processors are currently using which maps,
95  *	and to when physical maps must be made correct.
96  */
97 
98 #include "opt_modular.h"
99 #include "opt_multiprocessor.h"
100 #include "opt_sysv.h"
101 
102 #define __PMAP_PRIVATE
103 
104 #include <sys/param.h>
105 #include <sys/systm.h>
106 #include <sys/proc.h>
107 #include <sys/buf.h>
108 #include <sys/pool.h>
109 #include <sys/atomic.h>
110 #include <sys/mutex.h>
111 #include <sys/atomic.h>
112 #include <sys/socketvar.h>	/* XXX: for sock_loan_thresh */
113 
114 #include <uvm/uvm.h>
115 
116 #define	PMAP_COUNT(name)	(pmap_evcnt_##name.ev_count++ + 0)
117 #define PMAP_COUNTER(name, desc) \
118 static struct evcnt pmap_evcnt_##name = \
119 	EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", desc); \
120 EVCNT_ATTACH_STATIC(pmap_evcnt_##name)
121 
122 PMAP_COUNTER(remove_kernel_calls, "remove kernel calls");
123 PMAP_COUNTER(remove_kernel_pages, "kernel pages unmapped");
124 PMAP_COUNTER(remove_user_calls, "remove user calls");
125 PMAP_COUNTER(remove_user_pages, "user pages unmapped");
126 PMAP_COUNTER(remove_flushes, "remove cache flushes");
127 PMAP_COUNTER(remove_tlb_ops, "remove tlb ops");
128 PMAP_COUNTER(remove_pvfirst, "remove pv first");
129 PMAP_COUNTER(remove_pvsearch, "remove pv search");
130 
131 PMAP_COUNTER(prefer_requests, "prefer requests");
132 PMAP_COUNTER(prefer_adjustments, "prefer adjustments");
133 
134 PMAP_COUNTER(idlezeroed_pages, "pages idle zeroed");
135 PMAP_COUNTER(zeroed_pages, "pages zeroed");
136 PMAP_COUNTER(copied_pages, "pages copied");
137 
138 PMAP_COUNTER(kenter_pa, "kernel fast mapped pages");
139 PMAP_COUNTER(kenter_pa_bad, "kernel fast mapped pages (bad color)");
140 PMAP_COUNTER(kenter_pa_unmanaged, "kernel fast mapped unmanaged pages");
141 PMAP_COUNTER(kremove_pages, "kernel fast unmapped pages");
142 
143 PMAP_COUNTER(page_cache_evictions, "pages changed to uncacheable");
144 PMAP_COUNTER(page_cache_restorations, "pages changed to cacheable");
145 
146 PMAP_COUNTER(kernel_mappings_bad, "kernel pages mapped (bad color)");
147 PMAP_COUNTER(user_mappings_bad, "user pages mapped (bad color)");
148 PMAP_COUNTER(kernel_mappings, "kernel pages mapped");
149 PMAP_COUNTER(user_mappings, "user pages mapped");
150 PMAP_COUNTER(user_mappings_changed, "user mapping changed");
151 PMAP_COUNTER(kernel_mappings_changed, "kernel mapping changed");
152 PMAP_COUNTER(uncached_mappings, "uncached pages mapped");
153 PMAP_COUNTER(unmanaged_mappings, "unmanaged pages mapped");
154 PMAP_COUNTER(managed_mappings, "managed pages mapped");
155 PMAP_COUNTER(mappings, "pages mapped");
156 PMAP_COUNTER(remappings, "pages remapped");
157 PMAP_COUNTER(unmappings, "pages unmapped");
158 PMAP_COUNTER(primary_mappings, "page initial mappings");
159 PMAP_COUNTER(primary_unmappings, "page final unmappings");
160 PMAP_COUNTER(tlb_hit, "page mapping");
161 
162 PMAP_COUNTER(exec_mappings, "exec pages mapped");
163 PMAP_COUNTER(exec_synced_mappings, "exec pages synced");
164 PMAP_COUNTER(exec_synced_remove, "exec pages synced (PR)");
165 PMAP_COUNTER(exec_synced_clear_modify, "exec pages synced (CM)");
166 PMAP_COUNTER(exec_synced_page_protect, "exec pages synced (PP)");
167 PMAP_COUNTER(exec_synced_protect, "exec pages synced (P)");
168 PMAP_COUNTER(exec_uncached_page_protect, "exec pages uncached (PP)");
169 PMAP_COUNTER(exec_uncached_clear_modify, "exec pages uncached (CM)");
170 PMAP_COUNTER(exec_uncached_zero_page, "exec pages uncached (ZP)");
171 PMAP_COUNTER(exec_uncached_copy_page, "exec pages uncached (CP)");
172 PMAP_COUNTER(exec_uncached_remove, "exec pages uncached (PR)");
173 
174 PMAP_COUNTER(create, "creates");
175 PMAP_COUNTER(reference, "references");
176 PMAP_COUNTER(dereference, "dereferences");
177 PMAP_COUNTER(destroy, "destroyed");
178 PMAP_COUNTER(activate, "activations");
179 PMAP_COUNTER(deactivate, "deactivations");
180 PMAP_COUNTER(update, "updates");
181 #ifdef MULTIPROCESSOR
182 PMAP_COUNTER(shootdown_ipis, "shootdown IPIs");
183 #endif
184 PMAP_COUNTER(unwire, "unwires");
185 PMAP_COUNTER(copy, "copies");
186 PMAP_COUNTER(clear_modify, "clear_modifies");
187 PMAP_COUNTER(protect, "protects");
188 PMAP_COUNTER(page_protect, "page_protects");
189 
190 #define PMAP_ASID_RESERVED 0
191 CTASSERT(PMAP_ASID_RESERVED == 0);
192 
193 /*
194  * Initialize the kernel pmap.
195  */
196 #ifdef MULTIPROCESSOR
197 #define	PMAP_SIZE	offsetof(struct pmap, pm_pai[PMAP_TLB_MAX])
198 #else
199 #define	PMAP_SIZE	sizeof(struct pmap)
200 kmutex_t pmap_pvlist_mutex __aligned(COHERENCY_UNIT);
201 #endif
202 
203 struct pmap_kernel kernel_pmap_store = {
204 	.kernel_pmap = {
205 		.pm_count = 1,
206 		.pm_segtab = PMAP_INVALID_SEGTAB_ADDRESS,
207 		.pm_minaddr = VM_MIN_KERNEL_ADDRESS,
208 		.pm_maxaddr = VM_MAX_KERNEL_ADDRESS,
209 	},
210 };
211 
212 struct pmap * const kernel_pmap_ptr = &kernel_pmap_store.kernel_pmap;
213 
214 struct pmap_limits pmap_limits = {
215 	.virtual_start = VM_MIN_KERNEL_ADDRESS,
216 };
217 
218 #ifdef UVMHIST
219 static struct kern_history_ent pmapexechistbuf[10000];
220 static struct kern_history_ent pmaphistbuf[10000];
221 UVMHIST_DEFINE(pmapexechist);
222 UVMHIST_DEFINE(pmaphist);
223 #endif
224 
225 /*
226  * The pools from which pmap structures and sub-structures are allocated.
227  */
228 struct pool pmap_pmap_pool;
229 struct pool pmap_pv_pool;
230 
231 #ifndef PMAP_PV_LOWAT
232 #define	PMAP_PV_LOWAT	16
233 #endif
234 int		pmap_pv_lowat = PMAP_PV_LOWAT;
235 
236 bool		pmap_initialized = false;
237 #define	PMAP_PAGE_COLOROK_P(a, b) \
238 		((((int)(a) ^ (int)(b)) & pmap_page_colormask) == 0)
239 u_int		pmap_page_colormask;
240 
241 #define PAGE_IS_MANAGED(pa)	\
242 	(pmap_initialized == true && vm_physseg_find(atop(pa), NULL) != -1)
243 
244 #define PMAP_IS_ACTIVE(pm)						\
245 	((pm) == pmap_kernel() || 					\
246 	 (pm) == curlwp->l_proc->p_vmspace->vm_map.pmap)
247 
248 /* Forward function declarations */
249 void pmap_remove_pv(pmap_t, vaddr_t, struct vm_page *, bool);
250 void pmap_enter_pv(pmap_t, vaddr_t, struct vm_page *, u_int *);
251 
252 /*
253  * PV table management functions.
254  */
255 void	*pmap_pv_page_alloc(struct pool *, int);
256 void	pmap_pv_page_free(struct pool *, void *);
257 
258 struct pool_allocator pmap_pv_page_allocator = {
259 	pmap_pv_page_alloc, pmap_pv_page_free, 0,
260 };
261 
262 #define	pmap_pv_alloc()		pool_get(&pmap_pv_pool, PR_NOWAIT)
263 #define	pmap_pv_free(pv)	pool_put(&pmap_pv_pool, (pv))
264 
265 #if !defined(MULTIPROCESSOR) || !defined(PMAP_MD_NEED_TLB_MISS_LOCK)
266 #define	pmap_md_tlb_miss_lock_enter()	do { } while(/*CONSTCOND*/0)
267 #define	pmap_md_tlb_miss_lock_exit()	do { } while(/*CONSTCOND*/0)
268 #endif	/* !MULTIPROCESSOR || !PMAP_MD_NEED_TLB_MISS_LOCK */
269 
270 /*
271  * Misc. functions.
272  */
273 
274 bool
275 pmap_page_clear_attributes(struct vm_page_md *mdpg, u_int clear_attributes)
276 {
277 	volatile u_int * const attrp = &mdpg->mdpg_attrs;
278 #ifdef MULTIPROCESSOR
279 	for (;;) {
280 		u_int old_attr = *attrp;
281 		if ((old_attr & clear_attributes) == 0)
282 			return false;
283 		u_int new_attr = old_attr & ~clear_attributes;
284 		if (old_attr == atomic_cas_uint(attrp, old_attr, new_attr))
285 			return true;
286 	}
287 #else
288 	u_int old_attr = *attrp;
289 	if ((old_attr & clear_attributes) == 0)
290 		return false;
291 	*attrp &= ~clear_attributes;
292 	return true;
293 #endif
294 }
295 
296 void
297 pmap_page_set_attributes(struct vm_page_md *mdpg, u_int set_attributes)
298 {
299 #ifdef MULTIPROCESSOR
300 	atomic_or_uint(&mdpg->mdpg_attrs, set_attributes);
301 #else
302 	mdpg->mdpg_attrs |= set_attributes;
303 #endif
304 }
305 
306 static void
307 pmap_page_syncicache(struct vm_page *pg)
308 {
309 #ifndef MULTIPROCESSOR
310 	struct pmap * const curpmap = curcpu()->ci_curpm;
311 #endif
312 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
313 	pv_entry_t pv = &mdpg->mdpg_first;
314 	kcpuset_t *onproc;
315 #ifdef MULTIPROCESSOR
316 	kcpuset_create(&onproc, true);
317 #else
318 	onproc = NULL;
319 #endif
320 	(void)VM_PAGEMD_PVLIST_LOCK(mdpg, false);
321 
322 	if (pv->pv_pmap != NULL) {
323 		for (; pv != NULL; pv = pv->pv_next) {
324 #ifdef MULTIPROCESSOR
325 			kcpuset_merge(onproc, pv->pv_pmap->pm_onproc);
326 			if (kcpuset_match(onproc, kcpuset_running)) {
327 				break;
328 			}
329 #else
330 			if (pv->pv_pmap == curpmap) {
331 				onproc = curcpu()->ci_data.cpu_kcpuset;
332 				break;
333 			}
334 #endif
335 		}
336 	}
337 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
338 	kpreempt_disable();
339 	pmap_md_page_syncicache(pg, onproc);
340 #ifdef MULTIPROCESSOR
341 	kcpuset_destroy(onproc);
342 #endif
343 	kpreempt_enable();
344 }
345 
346 /*
347  * Define the initial bounds of the kernel virtual address space.
348  */
349 void
350 pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
351 {
352 
353 	*vstartp = pmap_limits.virtual_start;
354 	*vendp = pmap_limits.virtual_end;
355 }
356 
357 vaddr_t
358 pmap_growkernel(vaddr_t maxkvaddr)
359 {
360 	vaddr_t virtual_end = pmap_limits.virtual_end;
361 	maxkvaddr = pmap_round_seg(maxkvaddr) - 1;
362 
363 	/*
364 	 * Reserve PTEs for the new KVA space.
365 	 */
366 	for (; virtual_end < maxkvaddr; virtual_end += NBSEG) {
367 		pmap_pte_reserve(pmap_kernel(), virtual_end, 0);
368 	}
369 
370 	/*
371 	 * Don't exceed VM_MAX_KERNEL_ADDRESS!
372 	 */
373 	if (virtual_end == 0 || virtual_end > VM_MAX_KERNEL_ADDRESS)
374 		virtual_end = VM_MAX_KERNEL_ADDRESS;
375 
376 	/*
377 	 * Update new end.
378 	 */
379 	pmap_limits.virtual_end = virtual_end;
380 	return virtual_end;
381 }
382 
383 /*
384  * Bootstrap memory allocator (alternative to vm_bootstrap_steal_memory()).
385  * This function allows for early dynamic memory allocation until the virtual
386  * memory system has been bootstrapped.  After that point, either kmem_alloc
387  * or malloc should be used.  This function works by stealing pages from the
388  * (to be) managed page pool, then implicitly mapping the pages (by using
389  * their k0seg addresses) and zeroing them.
390  *
391  * It may be used once the physical memory segments have been pre-loaded
392  * into the vm_physmem[] array.  Early memory allocation MUST use this
393  * interface!  This cannot be used after vm_page_startup(), and will
394  * generate a panic if tried.
395  *
396  * Note that this memory will never be freed, and in essence it is wired
397  * down.
398  *
399  * We must adjust *vstartp and/or *vendp iff we use address space
400  * from the kernel virtual address range defined by pmap_virtual_space().
401  */
402 vaddr_t
403 pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
404 {
405 	u_int npgs;
406 	paddr_t pa;
407 	vaddr_t va;
408 
409 	size = round_page(size);
410 	npgs = atop(size);
411 
412 	for (u_int bank = 0; bank < vm_nphysseg; bank++) {
413 		struct vm_physseg * const seg = VM_PHYSMEM_PTR(bank);
414 		if (uvm.page_init_done == true)
415 			panic("pmap_steal_memory: called _after_ bootstrap");
416 
417 		if (seg->avail_start != seg->start ||
418 		    seg->avail_start >= seg->avail_end)
419 			continue;
420 
421 		if ((seg->avail_end - seg->avail_start) < npgs)
422 			continue;
423 
424 		/*
425 		 * There are enough pages here; steal them!
426 		 */
427 		pa = ptoa(seg->avail_start);
428 		seg->avail_start += npgs;
429 		seg->start += npgs;
430 
431 		/*
432 		 * Have we used up this segment?
433 		 */
434 		if (seg->avail_start == seg->end) {
435 			if (vm_nphysseg == 1)
436 				panic("pmap_steal_memory: out of memory!");
437 
438 			/* Remove this segment from the list. */
439 			vm_nphysseg--;
440 			if (bank < vm_nphysseg)
441 				memmove(seg, seg+1,
442 				    sizeof(*seg) * (vm_nphysseg - bank));
443 		}
444 
445 		va = pmap_md_map_poolpage(pa, size);
446 		memset((void *)va, 0, size);
447 		return va;
448 	}
449 
450 	/*
451 	 * If we got here, there was no memory left.
452 	 */
453 	panic("pmap_steal_memory: no memory to steal");
454 }
455 
456 /*
457  *	Initialize the pmap module.
458  *	Called by vm_init, to initialize any structures that the pmap
459  *	system needs to map virtual memory.
460  */
461 void
462 pmap_init(void)
463 {
464 	UVMHIST_INIT_STATIC(pmapexechist, pmapexechistbuf);
465 	UVMHIST_INIT_STATIC(pmaphist, pmaphistbuf);
466 
467 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
468 
469 	/*
470 	 * Initialize the segtab lock.
471 	 */
472 	mutex_init(&pmap_segtab_lock, MUTEX_DEFAULT, IPL_HIGH);
473 
474 	/*
475 	 * Set a low water mark on the pv_entry pool, so that we are
476 	 * more likely to have these around even in extreme memory
477 	 * starvation.
478 	 */
479 	pool_setlowat(&pmap_pv_pool, pmap_pv_lowat);
480 
481 	pmap_md_init();
482 
483 	/*
484 	 * Now it is safe to enable pv entry recording.
485 	 */
486 	pmap_initialized = true;
487 }
488 
489 /*
490  *	Create and return a physical map.
491  *
492  *	If the size specified for the map
493  *	is zero, the map is an actual physical
494  *	map, and may be referenced by the
495  *	hardware.
496  *
497  *	If the size specified is non-zero,
498  *	the map will be used in software only, and
499  *	is bounded by that size.
500  */
501 pmap_t
502 pmap_create(void)
503 {
504 	pmap_t pmap;
505 
506 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
507 	PMAP_COUNT(create);
508 
509 	pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
510 	memset(pmap, 0, PMAP_SIZE);
511 
512 	KASSERT(pmap->pm_pai[0].pai_link.le_prev == NULL);
513 
514 	pmap->pm_count = 1;
515 	pmap->pm_minaddr = VM_MIN_ADDRESS;
516 	pmap->pm_maxaddr = VM_MAXUSER_ADDRESS;
517 
518 	pmap_segtab_init(pmap);
519 
520 #ifdef MULTIPROCESSOR
521 	kcpuset_create(&pmap->pm_active, true);
522 	kcpuset_create(&pmap->pm_onproc, true);
523 #endif
524 
525 	UVMHIST_LOG(pmaphist, "<- pmap %p", pmap,0,0,0);
526 	return pmap;
527 }
528 
529 /*
530  *	Retire the given physical map from service.
531  *	Should only be called if the map contains
532  *	no valid mappings.
533  */
534 void
535 pmap_destroy(pmap_t pmap)
536 {
537 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
538 	UVMHIST_LOG(pmaphist, "(pmap=%p)", pmap, 0,0,0);
539 
540 	if (atomic_dec_uint_nv(&pmap->pm_count) > 0) {
541 		PMAP_COUNT(dereference);
542 		return;
543 	}
544 
545 	KASSERT(pmap->pm_count == 0);
546 	PMAP_COUNT(destroy);
547 	kpreempt_disable();
548 	pmap_md_tlb_miss_lock_enter();
549 	pmap_tlb_asid_release_all(pmap);
550 	pmap_segtab_destroy(pmap, NULL, 0);
551 	pmap_md_tlb_miss_lock_exit();
552 
553 #ifdef MULTIPROCESSOR
554 	kcpuset_destroy(pmap->pm_active);
555 	kcpuset_destroy(pmap->pm_onproc);
556 #endif
557 
558 	pool_put(&pmap_pmap_pool, pmap);
559 	kpreempt_enable();
560 
561 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
562 }
563 
564 /*
565  *	Add a reference to the specified pmap.
566  */
567 void
568 pmap_reference(pmap_t pmap)
569 {
570 
571 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
572 	UVMHIST_LOG(pmaphist, "(pmap=%p)", pmap, 0,0,0);
573 	PMAP_COUNT(reference);
574 
575 	if (pmap != NULL) {
576 		atomic_inc_uint(&pmap->pm_count);
577 	}
578 
579 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
580 }
581 
582 /*
583  *	Make a new pmap (vmspace) active for the given process.
584  */
585 void
586 pmap_activate(struct lwp *l)
587 {
588 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
589 
590 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
591 	UVMHIST_LOG(pmaphist, "(l=%p (pmap=%p))", l, pmap, 0,0);
592 	PMAP_COUNT(activate);
593 
594 	kpreempt_disable();
595 	pmap_md_tlb_miss_lock_enter();
596 	pmap_tlb_asid_acquire(pmap, l);
597 	if (l == curlwp) {
598 		pmap_segtab_activate(pmap, l);
599 	}
600 	pmap_md_tlb_miss_lock_exit();
601 	kpreempt_enable();
602 
603 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
604 }
605 
606 /*
607  *	Make a previously active pmap (vmspace) inactive.
608  */
609 void
610 pmap_deactivate(struct lwp *l)
611 {
612 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
613 
614 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
615 	UVMHIST_LOG(pmaphist, "(l=%p (pmap=%p))", l, pmap, 0,0);
616 	PMAP_COUNT(deactivate);
617 
618 	kpreempt_disable();
619 	pmap_md_tlb_miss_lock_enter();
620 	curcpu()->ci_pmap_user_segtab = PMAP_INVALID_SEGTAB_ADDRESS;
621 	pmap_tlb_asid_deactivate(pmap);
622 	pmap_md_tlb_miss_lock_exit();
623 	kpreempt_enable();
624 
625 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
626 }
627 
628 void
629 pmap_update(struct pmap *pmap)
630 {
631 
632 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
633 	UVMHIST_LOG(pmaphist, "(pmap=%p)", pmap, 0,0,0);
634 	PMAP_COUNT(update);
635 
636 	kpreempt_disable();
637 #if defined(MULTIPROCESSOR) && defined(PMAP_NEED_TLB_SHOOTDOWN)
638 	u_int pending = atomic_swap_uint(&pmap->pm_shootdown_pending, 0);
639 	if (pending && pmap_tlb_shootdown_bystanders(pmap))
640 		PMAP_COUNT(shootdown_ipis);
641 #endif
642 	pmap_md_tlb_miss_lock_enter();
643 #if defined(DEBUG) && !defined(MULTIPROCESSOR)
644 	pmap_tlb_check(pmap, pmap_md_tlb_check_entry);
645 #endif /* DEBUG */
646 
647 	/*
648 	 * If pmap_remove_all was called, we deactivated ourselves and nuked
649 	 * our ASID.  Now we have to reactivate ourselves.
650 	 */
651 	if (__predict_false(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE)) {
652 		pmap->pm_flags ^= PMAP_DEFERRED_ACTIVATE;
653 		pmap_tlb_asid_acquire(pmap, curlwp);
654 		pmap_segtab_activate(pmap, curlwp);
655 	}
656 	pmap_md_tlb_miss_lock_exit();
657 	kpreempt_enable();
658 
659 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
660 }
661 
662 /*
663  *	Remove the given range of addresses from the specified map.
664  *
665  *	It is assumed that the start and end are properly
666  *	rounded to the page size.
667  */
668 
669 static bool
670 pmap_pte_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
671 	uintptr_t flags)
672 {
673 	const pt_entry_t npte = flags;
674 	const bool is_kernel_pmap_p = (pmap == pmap_kernel());
675 
676 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
677 	UVMHIST_LOG(pmaphist, "(pmap=%p %sva=%"PRIxVADDR"..%"PRIxVADDR,
678 	    pmap, (is_kernel_pmap_p ? "(kernel) " : ""), sva, eva);
679 	UVMHIST_LOG(pmaphist, "ptep=%p, flags(npte)=%#"PRIxPTR")",
680 	    ptep, flags, 0, 0);
681 
682 	KASSERT(kpreempt_disabled());
683 
684 	for (; sva < eva; sva += NBPG, ptep++) {
685 		pt_entry_t pt_entry = *ptep;
686 		if (!pte_valid_p(pt_entry))
687 			continue;
688 		if (is_kernel_pmap_p)
689 			PMAP_COUNT(remove_kernel_calls);
690 		else
691 			PMAP_COUNT(remove_user_pages);
692 		if (pte_wired_p(pt_entry))
693 			pmap->pm_stats.wired_count--;
694 		pmap->pm_stats.resident_count--;
695 		struct vm_page *pg = PHYS_TO_VM_PAGE(pte_to_paddr(pt_entry));
696 		if (__predict_true(pg != NULL)) {
697 			pmap_remove_pv(pmap, sva, pg,
698 			   pte_modified_p(pt_entry));
699 		}
700 		pmap_md_tlb_miss_lock_enter();
701 		*ptep = npte;
702 		/*
703 		 * Flush the TLB for the given address.
704 		 */
705 		pmap_tlb_invalidate_addr(pmap, sva);
706 		pmap_md_tlb_miss_lock_exit();
707 	}
708 	return false;
709 }
710 
711 void
712 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
713 {
714 	const bool is_kernel_pmap_p = (pmap == pmap_kernel());
715 	const pt_entry_t npte = pte_nv_entry(is_kernel_pmap_p);
716 
717 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
718 	UVMHIST_LOG(pmaphist, "(pmap=%p, va=%#"PRIxVADDR"..%#"PRIxVADDR")",
719 	    pmap, sva, eva, 0);
720 
721 	if (is_kernel_pmap_p)
722 		PMAP_COUNT(remove_kernel_calls);
723 	else
724 		PMAP_COUNT(remove_user_calls);
725 #ifdef PARANOIADIAG
726 	if (sva < pm->pm_minaddr || eva > pm->pm_maxaddr)
727 		panic("%s: va range %#"PRIxVADDR"-%#"PRIxVADDR" not in range",
728 		    __func__, sva, eva - 1);
729 	if (PMAP_IS_ACTIVE(pmap)) {
730 		struct pmap_asid_info * const pai = PMAP_PAI(pmap, curcpu());
731 		uint32_t asid = tlb_get_asid();
732 		if (asid != pai->pai_asid) {
733 			panic("%s: inconsistency for active TLB flush"
734 			    ": %d <-> %d", __func__, asid, pai->pai_asid);
735 		}
736 	}
737 #endif
738 	kpreempt_disable();
739 	pmap_pte_process(pmap, sva, eva, pmap_pte_remove, npte);
740 	kpreempt_enable();
741 
742 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
743 }
744 
745 /*
746  *	pmap_page_protect:
747  *
748  *	Lower the permission for all mappings to a given page.
749  */
750 void
751 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
752 {
753 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
754 	pv_entry_t pv;
755 	vaddr_t va;
756 
757 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
758 	UVMHIST_LOG(pmaphist, "(pg=%p (pa %#"PRIxPADDR") prot=%#x)",
759 	    pg, VM_PAGE_TO_PHYS(pg), prot, 0);
760 	PMAP_COUNT(page_protect);
761 
762 	switch (prot) {
763 	case VM_PROT_READ|VM_PROT_WRITE:
764 	case VM_PROT_ALL:
765 		break;
766 
767 	/* copy_on_write */
768 	case VM_PROT_READ:
769 	case VM_PROT_READ|VM_PROT_EXECUTE:
770 		(void)VM_PAGEMD_PVLIST_LOCK(mdpg, false);
771 		pv = &mdpg->mdpg_first;
772 		/*
773 		 * Loop over all current mappings setting/clearing as appropriate.
774 		 */
775 		if (pv->pv_pmap != NULL) {
776 			while (pv != NULL) {
777 				const pmap_t pmap = pv->pv_pmap;
778 				const uint16_t gen = VM_PAGEMD_PVLIST_GEN(mdpg);
779 				va = pv->pv_va;
780 				VM_PAGEMD_PVLIST_UNLOCK(mdpg);
781 				pmap_protect(pmap, va, va + PAGE_SIZE, prot);
782 				KASSERT(pv->pv_pmap == pmap);
783 				pmap_update(pmap);
784 				if (gen != VM_PAGEMD_PVLIST_LOCK(mdpg, false)) {
785 					pv = &mdpg->mdpg_first;
786 				} else {
787 					pv = pv->pv_next;
788 				}
789 			}
790 		}
791 		VM_PAGEMD_PVLIST_UNLOCK(mdpg);
792 		break;
793 
794 	/* remove_all */
795 	default:
796 		/*
797 		 * Do this first so that for each unmapping, pmap_remove_pv
798 		 * won't try to sync the icache.
799 		 */
800 		if (pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE)) {
801 			UVMHIST_LOG(pmapexechist, "pg %p (pa %#"PRIxPADDR
802 			    "): execpage cleared", pg, VM_PAGE_TO_PHYS(pg),0,0);
803 			PMAP_COUNT(exec_uncached_page_protect);
804 		}
805 		(void)VM_PAGEMD_PVLIST_LOCK(mdpg, false);
806 		pv = &mdpg->mdpg_first;
807 		while (pv->pv_pmap != NULL) {
808 			const pmap_t pmap = pv->pv_pmap;
809 			va = pv->pv_va;
810 			VM_PAGEMD_PVLIST_UNLOCK(mdpg);
811 			pmap_remove(pmap, va, va + PAGE_SIZE);
812 			pmap_update(pmap);
813 			(void)VM_PAGEMD_PVLIST_LOCK(mdpg, false);
814 		}
815 		VM_PAGEMD_PVLIST_UNLOCK(mdpg);
816 	}
817 
818 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
819 }
820 
821 static bool
822 pmap_pte_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
823 	uintptr_t flags)
824 {
825 	const vm_prot_t prot = (flags & VM_PROT_ALL);
826 
827 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
828 	UVMHIST_LOG(pmaphist, "(pmap=%p %sva=%"PRIxVADDR"..%"PRIxVADDR,
829 	    pmap, (pmap == pmap_kernel() ? "(kernel) " : ""), sva, eva);
830 	UVMHIST_LOG(pmaphist, "ptep=%p, flags(npte)=%#"PRIxPTR")",
831 	    ptep, flags, 0, 0);
832 
833 	KASSERT(kpreempt_disabled());
834 	/*
835 	 * Change protection on every valid mapping within this segment.
836 	 */
837 	for (; sva < eva; sva += NBPG, ptep++) {
838 		pt_entry_t pt_entry = *ptep;
839 		if (!pte_valid_p(pt_entry))
840 			continue;
841 		struct vm_page * const pg =
842 		    PHYS_TO_VM_PAGE(pte_to_paddr(pt_entry));
843 		if (pg != NULL && pte_modified_p(pt_entry)) {
844 			struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
845 			pmap_md_vca_clean(pg, sva, PMAP_WBINV);
846 			if (VM_PAGEMD_EXECPAGE_P(mdpg)) {
847 				KASSERT(mdpg->mdpg_first.pv_pmap != NULL);
848 				if (pte_cached_p(pt_entry)) {
849 					UVMHIST_LOG(pmapexechist,
850 					    "pg %p (pa %#"PRIxPADDR"): %s",
851 					    pg, VM_PAGE_TO_PHYS(pg),
852 					    "syncicached performed", 0);
853 					pmap_page_syncicache(pg);
854 					PMAP_COUNT(exec_synced_protect);
855 				}
856 			}
857 		}
858 		pt_entry = pte_prot_downgrade(pt_entry, prot);
859 		if (*ptep != pt_entry) {
860 			pmap_md_tlb_miss_lock_enter();
861 			*ptep = pt_entry;
862 			/*
863 			 * Update the TLB if needed.
864 			 */
865 			pmap_tlb_update_addr(pmap, sva, pt_entry,
866 			    PMAP_TLB_NEED_IPI);
867 			pmap_md_tlb_miss_lock_exit();
868 		}
869 	}
870 	return false;
871 }
872 
873 /*
874  *	Set the physical protection on the
875  *	specified range of this map as requested.
876  */
877 void
878 pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
879 {
880 
881 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
882 	UVMHIST_LOG(pmaphist,
883 	    "  pmap=%p, va=%#"PRIxVADDR"..%#"PRIxVADDR" port=%#x)",
884 	    pmap, sva, eva, prot);
885 	PMAP_COUNT(protect);
886 
887 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
888 		pmap_remove(pmap, sva, eva);
889 		UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
890 		return;
891 	}
892 
893 #ifdef PARANOIADIAG
894 	if (sva < pm->pm_minaddr || eva > pm->pm_maxaddr)
895 		panic("%s: va range %#"PRIxVADDR"-%#"PRIxVADDR" not in range",
896 		    __func__, sva, eva - 1);
897 	if (PMAP_IS_ACTIVE(pmap)) {
898 		struct pmap_asid_info * const pai = PMAP_PAI(pmap, curcpu());
899 		uint32_t asid = tlb_get_asid();
900 		if (asid != pai->pai_asid) {
901 			panic("%s: inconsistency for active TLB update"
902 			    ": %d <-> %d", __func__, asid, pai->pai_asid);
903 		}
904 	}
905 #endif
906 
907 	/*
908 	 * Change protection on every valid mapping within this segment.
909 	 */
910 	kpreempt_disable();
911 	pmap_pte_process(pmap, sva, eva, pmap_pte_protect, prot);
912 	kpreempt_enable();
913 
914 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
915 }
916 
917 #if defined(__PMAP_VIRTUAL_CACHE_ALIASES)
918 /*
919  *	pmap_page_cache:
920  *
921  *	Change all mappings of a managed page to cached/uncached.
922  */
923 static void
924 pmap_page_cache(struct vm_page *pg, bool cached)
925 {
926 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
927 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
928 	UVMHIST_LOG(pmaphist, "(pg=%p (pa %#"PRIxPADDR") cached=%s)",
929 	    pg, VM_PAGE_TO_PHYS(pg), cached ? "true" : "false", 0);
930 	KASSERT(kpreempt_disabled());
931 
932 	if (cached) {
933 		pmap_page_clear_attributes(mdpg, VM_PAGEMD_UNCACHED);
934 		PMAP_COUNT(page_cache_restorations);
935 	} else {
936 		pmap_page_set_attributes(mdpg, VM_PAGEMD_UNCACHED);
937 		PMAP_COUNT(page_cache_evictions);
938 	}
939 
940 	KASSERT(VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
941 	KASSERT(kpreempt_disabled());
942 	for (pv_entry_t pv = &mdpg->mdpg_first;
943 	     pv != NULL;
944 	     pv = pv->pv_next) {
945 		pmap_t pmap = pv->pv_pmap;
946 		vaddr_t va = pv->pv_va;
947 
948 		KASSERT(pmap != NULL);
949 		KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va));
950 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
951 		if (ptep == NULL)
952 			continue;
953 		pt_entry_t pt_entry = *ptep;
954 		if (pte_valid_p(pt_entry)) {
955 			pt_entry = pte_cached_change(pt_entry, cached);
956 			pmap_md_tlb_miss_lock_enter();
957 			*ptep = pt_entry;
958 			pmap_tlb_update_addr(pmap, va, pt_entry,
959 			    PMAP_TLB_NEED_IPI);
960 			pmap_md_tlb_miss_lock_exit();
961 		}
962 	}
963 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
964 }
965 #endif	/* __PMAP_VIRTUAL_CACHE_ALIASES */
966 
967 /*
968  *	Insert the given physical page (p) at
969  *	the specified virtual address (v) in the
970  *	target physical map with the protection requested.
971  *
972  *	If specified, the page will be wired down, meaning
973  *	that the related pte can not be reclaimed.
974  *
975  *	NB:  This is the only routine which MAY NOT lazy-evaluate
976  *	or lose information.  That is, this routine must actually
977  *	insert this page into the given map NOW.
978  */
979 int
980 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
981 {
982 	pt_entry_t npte;
983 	const bool wired = (flags & PMAP_WIRED) != 0;
984 	const bool is_kernel_pmap_p = (pmap == pmap_kernel());
985 #ifdef UVMHIST
986 	struct kern_history * const histp =
987 	    ((prot & VM_PROT_EXECUTE) ? &pmapexechist : &pmaphist);
988 #endif
989 
990 	UVMHIST_FUNC(__func__);
991 #define VM_PROT_STRING(prot) \
992 	&"\0    (R)\0  (W)\0  (RW)\0 (X)\0  (RX)\0 (WX)\0 (RWX)\0"[UVM_PROTECTION(prot)*6]
993 	UVMHIST_CALLED(*histp);
994 	UVMHIST_LOG(*histp, "(pmap=%p, va=%#"PRIxVADDR", pa=%#"PRIxPADDR,
995 	    pmap, va, pa, 0);
996 	UVMHIST_LOG(*histp, "prot=%#x%s flags=%#x%s)",
997 	    prot, VM_PROT_STRING(prot), flags, VM_PROT_STRING(flags));
998 
999 	const bool good_color = PMAP_PAGE_COLOROK_P(pa, va);
1000 	if (is_kernel_pmap_p) {
1001 		PMAP_COUNT(kernel_mappings);
1002 		if (!good_color)
1003 			PMAP_COUNT(kernel_mappings_bad);
1004 	} else {
1005 		PMAP_COUNT(user_mappings);
1006 		if (!good_color)
1007 			PMAP_COUNT(user_mappings_bad);
1008 	}
1009 #if defined(DEBUG) || defined(DIAGNOSTIC) || defined(PARANOIADIAG)
1010 	if (va < pmap->pm_minaddr || va >= pmap->pm_maxaddr)
1011 		panic("%s: %s %#"PRIxVADDR" too big",
1012 		    __func__, is_kernel_pmap_p ? "kva" : "uva", va);
1013 #endif
1014 
1015 	KASSERTMSG(prot & VM_PROT_READ,
1016 	    "%s: no READ (%#x) in prot %#x", __func__, VM_PROT_READ, prot);
1017 
1018 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
1019 	struct vm_page_md *mdpg;
1020 
1021 	if (pg) {
1022 		mdpg = VM_PAGE_TO_MD(pg);
1023 		/* Set page referenced/modified status based on flags */
1024 		if (flags & VM_PROT_WRITE)
1025 			pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED|VM_PAGEMD_REFERENCED);
1026 		else if (flags & VM_PROT_ALL)
1027 			pmap_page_set_attributes(mdpg, VM_PAGEMD_REFERENCED);
1028 
1029 #ifdef __PMAP_VIRTUAL_CACHE_ALIASES
1030 		if (!VM_PAGEMD_CACHED(pg))
1031 			flags |= PMAP_NOCACHE;
1032 #endif
1033 
1034 		PMAP_COUNT(managed_mappings);
1035 	} else {
1036 		/*
1037 		 * Assumption: if it is not part of our managed memory
1038 		 * then it must be device memory which may be volatile.
1039 		 */
1040 		mdpg = NULL;
1041 		flags |= PMAP_NOCACHE;
1042 		PMAP_COUNT(unmanaged_mappings);
1043 	}
1044 
1045 	npte = pte_make_enter(pa, mdpg, prot, flags, is_kernel_pmap_p);
1046 
1047 	kpreempt_disable();
1048 	pt_entry_t * const ptep = pmap_pte_reserve(pmap, va, flags);
1049 	if (__predict_false(ptep == NULL)) {
1050 		kpreempt_enable();
1051 		UVMHIST_LOG(*histp, "<- ENOMEM", 0,0,0,0);
1052 		return ENOMEM;
1053 	}
1054 	pt_entry_t opte = *ptep;
1055 
1056 	/* Done after case that may sleep/return. */
1057 	if (pg)
1058 		pmap_enter_pv(pmap, va, pg, &npte);
1059 
1060 	/*
1061 	 * Now validate mapping with desired protection/wiring.
1062 	 * Assume uniform modified and referenced status for all
1063 	 * MIPS pages in a MACH page.
1064 	 */
1065 	if (wired) {
1066 		pmap->pm_stats.wired_count++;
1067 		npte = pte_wire_entry(npte);
1068 	}
1069 
1070 	UVMHIST_LOG(*histp, "new pte %#x (pa %#"PRIxPADDR")", npte, pa, 0,0);
1071 
1072 	if (pte_valid_p(opte) && pte_to_paddr(opte) != pa) {
1073 		pmap_remove(pmap, va, va + NBPG);
1074 		PMAP_COUNT(user_mappings_changed);
1075 	}
1076 
1077 	KASSERT(pte_valid_p(npte));
1078 	bool resident = pte_valid_p(opte);
1079 	if (!resident)
1080 		pmap->pm_stats.resident_count++;
1081 	pmap_md_tlb_miss_lock_enter();
1082 	*ptep = npte;
1083 
1084 	pmap_tlb_update_addr(pmap, va, npte,
1085 	    ((flags & VM_PROT_ALL) ? PMAP_TLB_INSERT : 0)
1086 	    | (resident ? PMAP_TLB_NEED_IPI : 0));
1087 	pmap_md_tlb_miss_lock_exit();
1088 	kpreempt_enable();
1089 
1090 	if (pg != NULL && (prot == (VM_PROT_READ | VM_PROT_EXECUTE))) {
1091 		KASSERT(mdpg != NULL);
1092 		PMAP_COUNT(exec_mappings);
1093 		if (!VM_PAGEMD_EXECPAGE_P(mdpg) && pte_cached_p(npte)) {
1094 			if (!pte_deferred_exec_p(npte)) {
1095 				UVMHIST_LOG(*histp,
1096 				    "va=%#"PRIxVADDR" pg %p: %s syncicache%s",
1097 				    va, pg, "immediate", "");
1098 				pmap_page_syncicache(pg);
1099 				pmap_page_set_attributes(mdpg,
1100 				    VM_PAGEMD_EXECPAGE);
1101 				PMAP_COUNT(exec_synced_mappings);
1102 			} else {
1103 				UVMHIST_LOG(*histp, "va=%#"PRIxVADDR
1104 				    " pg %p: %s syncicache: pte %#x",
1105 				    va, pg, "defer", npte);
1106 			}
1107 		} else {
1108 			UVMHIST_LOG(*histp,
1109 			    "va=%#"PRIxVADDR" pg %p: %s syncicache%s",
1110 			    va, pg, "no",
1111 			    (pte_cached_p(npte)
1112 				? " (already exec)"
1113 				: " (uncached)"));
1114 		}
1115 	} else if (pg != NULL && (prot & VM_PROT_EXECUTE)) {
1116 		KASSERT(mdpg != NULL);
1117 		KASSERT(prot & VM_PROT_WRITE);
1118 		PMAP_COUNT(exec_mappings);
1119 		pmap_page_syncicache(pg);
1120 		pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
1121 		UVMHIST_LOG(pmapexechist,
1122 		    "va=%#"PRIxVADDR" pg %p: %s syncicache%s",
1123 		    va, pg, "immediate", " (writeable)");
1124 	}
1125 
1126 	if (prot & VM_PROT_EXECUTE) {
1127 		UVMHIST_LOG(pmapexechist, "<- 0 (OK)", 0,0,0,0);
1128 	} else {
1129 		UVMHIST_LOG(pmaphist, "<- 0 (OK)", 0,0,0,0);
1130 	}
1131 	return 0;
1132 }
1133 
1134 void
1135 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1136 {
1137 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
1138 	struct vm_page_md *mdpg;
1139 
1140 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
1141 	UVMHIST_LOG(pmaphist, "(va=%#"PRIxVADDR" pa=%#"PRIxPADDR
1142 	    ", prot=%#x, flags=%#x)", va, pa, prot, flags);
1143 	PMAP_COUNT(kenter_pa);
1144 
1145 	if (pg == NULL) {
1146 		mdpg = NULL;
1147 		PMAP_COUNT(kenter_pa_unmanaged);
1148 		flags |= PMAP_NOCACHE;
1149 	} else {
1150 		mdpg = VM_PAGE_TO_MD(pg);
1151 	}
1152 
1153 	if ((flags & PMAP_NOCACHE) == 0 && !PMAP_PAGE_COLOROK_P(pa, va))
1154 		PMAP_COUNT(kenter_pa_bad);
1155 
1156 	const pt_entry_t npte = pte_make_kenter_pa(pa, mdpg, prot, flags);
1157 	kpreempt_disable();
1158 	pt_entry_t * const ptep = pmap_pte_reserve(pmap_kernel(), va, 0);
1159 	KASSERT(ptep != NULL);
1160 	KASSERT(!pte_valid_p(*ptep));
1161 	pmap_md_tlb_miss_lock_enter();
1162 	*ptep = npte;
1163 	/*
1164 	 * We have the option to force this mapping into the TLB but we
1165 	 * don't.  Instead let the next reference to the page do it.
1166 	 */
1167 	pmap_tlb_update_addr(pmap_kernel(), va, npte, 0);
1168 	pmap_md_tlb_miss_lock_exit();
1169 	kpreempt_enable();
1170 #if DEBUG > 1
1171 	for (u_int i = 0; i < PAGE_SIZE / sizeof(long); i++) {
1172 		if (((long *)va)[i] != ((long *)pa)[i])
1173 			panic("%s: contents (%lx) of va %#"PRIxVADDR
1174 			    " != contents (%lx) of pa %#"PRIxPADDR, __func__,
1175 			    ((long *)va)[i], va, ((long *)pa)[i], pa);
1176 	}
1177 #endif
1178 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
1179 }
1180 
1181 static bool
1182 pmap_pte_kremove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
1183 	uintptr_t flags)
1184 {
1185 	const pt_entry_t new_pt_entry = pte_nv_entry(true);
1186 
1187 	KASSERT(kpreempt_disabled());
1188 
1189 	/*
1190 	 * Set every pt on every valid mapping within this segment.
1191 	 */
1192 	for (; sva < eva; sva += NBPG, ptep++) {
1193 		pt_entry_t pt_entry = *ptep;
1194 		if (!pte_valid_p(pt_entry)) {
1195 			continue;
1196 		}
1197 
1198 		PMAP_COUNT(kremove_pages);
1199 		struct vm_page * const pg =
1200 		    PHYS_TO_VM_PAGE(pte_to_paddr(pt_entry));
1201 		if (pg != NULL)
1202 			pmap_md_vca_clean(pg, sva, PMAP_WBINV);
1203 
1204 		pmap_md_tlb_miss_lock_enter();
1205 		*ptep = new_pt_entry;
1206 		pmap_tlb_invalidate_addr(pmap_kernel(), sva);
1207 		pmap_md_tlb_miss_lock_exit();
1208 	}
1209 
1210 	return false;
1211 }
1212 
1213 void
1214 pmap_kremove(vaddr_t va, vsize_t len)
1215 {
1216 	const vaddr_t sva = trunc_page(va);
1217 	const vaddr_t eva = round_page(va + len);
1218 
1219 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
1220 	UVMHIST_LOG(pmaphist, "(va=%#"PRIxVADDR" len=%#"PRIxVSIZE")",
1221 	    va, len, 0,0);
1222 
1223 	kpreempt_disable();
1224 	pmap_pte_process(pmap_kernel(), sva, eva, pmap_pte_kremove, 0);
1225 	kpreempt_enable();
1226 
1227 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
1228 }
1229 
1230 void
1231 pmap_remove_all(struct pmap *pmap)
1232 {
1233 	KASSERT(pmap != pmap_kernel());
1234 
1235 	kpreempt_disable();
1236 	/*
1237 	 * Free all of our ASIDs which means we can skip doing all the
1238 	 * tlb_invalidate_addrs().
1239 	 */
1240 	pmap_md_tlb_miss_lock_enter();
1241 	pmap_tlb_asid_deactivate(pmap);
1242 	pmap_tlb_asid_release_all(pmap);
1243 	pmap_md_tlb_miss_lock_exit();
1244 	pmap->pm_flags |= PMAP_DEFERRED_ACTIVATE;
1245 
1246 	kpreempt_enable();
1247 }
1248 
1249 /*
1250  *	Routine:	pmap_unwire
1251  *	Function:	Clear the wired attribute for a map/virtual-address
1252  *			pair.
1253  *	In/out conditions:
1254  *			The mapping must already exist in the pmap.
1255  */
1256 void
1257 pmap_unwire(pmap_t pmap, vaddr_t va)
1258 {
1259 
1260 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
1261 	UVMHIST_LOG(pmaphist, "(pmap=%p va=%#"PRIxVADDR")", pmap, va, 0,0);
1262 	PMAP_COUNT(unwire);
1263 
1264 	/*
1265 	 * Don't need to flush the TLB since PG_WIRED is only in software.
1266 	 */
1267 #ifdef PARANOIADIAG
1268 	if (va < pmap->pm_minaddr || pmap->pm_maxaddr <= va)
1269 		panic("pmap_unwire");
1270 #endif
1271 	kpreempt_disable();
1272 	pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
1273 	pt_entry_t pt_entry = *ptep;
1274 #ifdef DIAGNOSTIC
1275 	if (ptep == NULL)
1276 		panic("%s: pmap %p va %#"PRIxVADDR" invalid STE",
1277 		    __func__, pmap, va);
1278 #endif
1279 
1280 #ifdef DIAGNOSTIC
1281 	if (!pte_valid_p(pt_entry))
1282 		panic("pmap_unwire: pmap %p va %#"PRIxVADDR" invalid PTE",
1283 		    pmap, va);
1284 #endif
1285 
1286 	if (pte_wired_p(pt_entry)) {
1287 		pmap_md_tlb_miss_lock_enter();
1288 		*ptep = pte_unwire_entry(*ptep);
1289 		pmap_md_tlb_miss_lock_exit();
1290 		pmap->pm_stats.wired_count--;
1291 	}
1292 #ifdef DIAGNOSTIC
1293 	else {
1294 		printf("%s: wiring for pmap %p va %#"PRIxVADDR" unchanged!\n",
1295 		    __func__, pmap, va);
1296 	}
1297 #endif
1298 	kpreempt_enable();
1299 }
1300 
1301 /*
1302  *	Routine:	pmap_extract
1303  *	Function:
1304  *		Extract the physical page address associated
1305  *		with the given map/virtual_address pair.
1306  */
1307 bool
1308 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
1309 {
1310 	paddr_t pa;
1311 
1312 	//UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
1313 	//UVMHIST_LOG(pmaphist, "(pmap=%p va=%#"PRIxVADDR")", pmap, va, 0,0);
1314 	if (pmap == pmap_kernel()) {
1315 		if (pmap_md_direct_mapped_vaddr_p(va)) {
1316 			pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
1317 			goto done;
1318 		}
1319 		if (pmap_md_io_vaddr_p(va))
1320 			panic("pmap_extract: io address %#"PRIxVADDR"", va);
1321 	}
1322 	kpreempt_disable();
1323 	pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
1324 	if (ptep == NULL) {
1325 		//UVMHIST_LOG(pmaphist, "<- false (not in segmap)", 0,0,0,0);
1326 		kpreempt_enable();
1327 		return false;
1328 	}
1329 	if (!pte_valid_p(*ptep)) {
1330 		//UVMHIST_LOG(pmaphist, "<- false (PTE not valid)", 0,0,0,0);
1331 		kpreempt_enable();
1332 		return false;
1333 	}
1334 	pa = pte_to_paddr(*ptep) | (va & PGOFSET);
1335 	kpreempt_enable();
1336 done:
1337 	if (pap != NULL) {
1338 		*pap = pa;
1339 	}
1340 	//UVMHIST_LOG(pmaphist, "<- true (pa %#"PRIxPADDR")", pa, 0,0,0);
1341 	return true;
1342 }
1343 
1344 /*
1345  *	Copy the range specified by src_addr/len
1346  *	from the source map to the range dst_addr/len
1347  *	in the destination map.
1348  *
1349  *	This routine is only advisory and need not do anything.
1350  */
1351 void
1352 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len,
1353     vaddr_t src_addr)
1354 {
1355 
1356 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
1357 	PMAP_COUNT(copy);
1358 }
1359 
1360 /*
1361  *	pmap_clear_reference:
1362  *
1363  *	Clear the reference bit on the specified physical page.
1364  */
1365 bool
1366 pmap_clear_reference(struct vm_page *pg)
1367 {
1368 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
1369 
1370 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
1371 	UVMHIST_LOG(pmaphist, "(pg=%p (pa %#"PRIxPADDR"))",
1372 	   pg, VM_PAGE_TO_PHYS(pg), 0,0);
1373 
1374 	bool rv = pmap_page_clear_attributes(mdpg, VM_PAGEMD_REFERENCED);
1375 
1376 	UVMHIST_LOG(pmaphist, "<- %s", rv ? "true" : "false", 0,0,0);
1377 
1378 	return rv;
1379 }
1380 
1381 /*
1382  *	pmap_is_referenced:
1383  *
1384  *	Return whether or not the specified physical page is referenced
1385  *	by any physical maps.
1386  */
1387 bool
1388 pmap_is_referenced(struct vm_page *pg)
1389 {
1390 
1391 	return VM_PAGEMD_REFERENCED_P(VM_PAGE_TO_MD(pg));
1392 }
1393 
1394 /*
1395  *	Clear the modify bits on the specified physical page.
1396  */
1397 bool
1398 pmap_clear_modify(struct vm_page *pg)
1399 {
1400 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
1401 	pv_entry_t pv = &mdpg->mdpg_first;
1402 	pv_entry_t pv_next;
1403 	uint16_t gen;
1404 
1405 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
1406 	UVMHIST_LOG(pmaphist, "(pg=%p (%#"PRIxPADDR"))",
1407 	    pg, VM_PAGE_TO_PHYS(pg), 0,0);
1408 	PMAP_COUNT(clear_modify);
1409 
1410 	if (VM_PAGEMD_EXECPAGE_P(mdpg)) {
1411 		if (pv->pv_pmap == NULL) {
1412 			UVMHIST_LOG(pmapexechist,
1413 			    "pg %p (pa %#"PRIxPADDR"): %s",
1414 			    pg, VM_PAGE_TO_PHYS(pg), "execpage cleared", 0);
1415 			pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
1416 			PMAP_COUNT(exec_uncached_clear_modify);
1417 		} else {
1418 			UVMHIST_LOG(pmapexechist,
1419 			    "pg %p (pa %#"PRIxPADDR"): %s",
1420 			    pg, VM_PAGE_TO_PHYS(pg), "syncicache performed", 0);
1421 			pmap_page_syncicache(pg);
1422 			PMAP_COUNT(exec_synced_clear_modify);
1423 		}
1424 	}
1425 	if (!pmap_page_clear_attributes(mdpg, VM_PAGEMD_MODIFIED)) {
1426 		UVMHIST_LOG(pmaphist, "<- false", 0,0,0,0);
1427 		return false;
1428 	}
1429 	if (pv->pv_pmap == NULL) {
1430 		UVMHIST_LOG(pmaphist, "<- true (no mappings)", 0,0,0,0);
1431 		return true;
1432 	}
1433 
1434 	/*
1435 	 * remove write access from any pages that are dirty
1436 	 * so we can tell if they are written to again later.
1437 	 * flush the VAC first if there is one.
1438 	 */
1439 	kpreempt_disable();
1440 	gen = VM_PAGEMD_PVLIST_LOCK(mdpg, false);
1441 	for (; pv != NULL; pv = pv_next) {
1442 		pmap_t pmap = pv->pv_pmap;
1443 		vaddr_t va = pv->pv_va;
1444 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
1445 		KASSERT(ptep);
1446 		pv_next = pv->pv_next;
1447 		pt_entry_t pt_entry = pte_prot_nowrite(*ptep);
1448 		if (*ptep == pt_entry) {
1449 			continue;
1450 		}
1451 		pmap_md_vca_clean(pg, va, PMAP_WBINV);
1452 		pmap_md_tlb_miss_lock_enter();
1453 		*ptep = pt_entry;
1454 		VM_PAGEMD_PVLIST_UNLOCK(mdpg);
1455 		pmap_tlb_invalidate_addr(pmap, va);
1456 		pmap_md_tlb_miss_lock_exit();
1457 		pmap_update(pmap);
1458 		if (__predict_false(gen != VM_PAGEMD_PVLIST_LOCK(mdpg, false))) {
1459 			/*
1460 			 * The list changed!  So restart from the beginning.
1461 			 */
1462 			pv_next = &mdpg->mdpg_first;
1463 		}
1464 	}
1465 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
1466 	kpreempt_enable();
1467 
1468 	UVMHIST_LOG(pmaphist, "<- true (mappings changed)", 0,0,0,0);
1469 	return true;
1470 }
1471 
1472 /*
1473  *	pmap_is_modified:
1474  *
1475  *	Return whether or not the specified physical page is modified
1476  *	by any physical maps.
1477  */
1478 bool
1479 pmap_is_modified(struct vm_page *pg)
1480 {
1481 
1482 	return VM_PAGEMD_MODIFIED_P(VM_PAGE_TO_MD(pg));
1483 }
1484 
1485 /*
1486  *	pmap_set_modified:
1487  *
1488  *	Sets the page modified reference bit for the specified page.
1489  */
1490 void
1491 pmap_set_modified(paddr_t pa)
1492 {
1493 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
1494 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
1495 	pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED|VM_PAGEMD_REFERENCED);
1496 }
1497 
1498 /******************** pv_entry management ********************/
1499 
1500 static void
1501 pmap_check_pvlist(struct vm_page *pg)
1502 {
1503 #ifdef PARANOIADIAG
1504 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
1505 	pt_entry_t pv = &mdpg->mdpg_first;
1506 	if (pv->pv_pmap != NULL) {
1507 		for (; pv != NULL; pv = pv->pv_next) {
1508 			KASSERT(!pmap_md_direct_mapped_vaddr_p(pv->pv_va));
1509 		}
1510 	}
1511 #endif /* PARANOIADIAG */
1512 }
1513 
1514 /*
1515  * Enter the pmap and virtual address into the
1516  * physical to virtual map table.
1517  */
1518 void
1519 pmap_enter_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, u_int *npte)
1520 {
1521 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
1522 	pv_entry_t pv, npv, apv;
1523 	int16_t gen;
1524 	bool first __unused = false;
1525 
1526 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
1527 	UVMHIST_LOG(pmaphist,
1528 	    "(pmap=%p va=%#"PRIxVADDR" pg=%p (%#"PRIxPADDR")",
1529 	    pmap, va, pg, VM_PAGE_TO_PHYS(pg));
1530 	UVMHIST_LOG(pmaphist, "nptep=%p (%#x))", npte, *npte, 0, 0);
1531 
1532 	KASSERT(kpreempt_disabled());
1533 	KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va));
1534 
1535 	apv = NULL;
1536 	pv = &mdpg->mdpg_first;
1537 	gen = VM_PAGEMD_PVLIST_LOCK(mdpg, true);
1538 	pmap_check_pvlist(pg);
1539 again:
1540 	if (pv->pv_pmap == NULL) {
1541 		KASSERT(pv->pv_next == NULL);
1542 		/*
1543 		 * No entries yet, use header as the first entry
1544 		 */
1545 		PMAP_COUNT(primary_mappings);
1546 		PMAP_COUNT(mappings);
1547 		first = true;
1548 #ifdef __PMAP_VIRTUAL_CACHE_ALIASES
1549 		pmap_page_clear_attributes(pg, VM_PAGEMD_UNCACHED);
1550 #endif
1551 		pv->pv_pmap = pmap;
1552 		pv->pv_va = va;
1553 	} else {
1554 		if (pmap_md_vca_add(pg, va, npte))
1555 			goto again;
1556 
1557 		/*
1558 		 * There is at least one other VA mapping this page.
1559 		 * Place this entry after the header.
1560 		 *
1561 		 * Note: the entry may already be in the table if
1562 		 * we are only changing the protection bits.
1563 		 */
1564 
1565 #ifdef PARANOIADIAG
1566 		const paddr_t pa = VM_PAGE_TO_PHYS(pg);
1567 #endif
1568 		for (npv = pv; npv; npv = npv->pv_next) {
1569 			if (pmap == npv->pv_pmap && va == npv->pv_va) {
1570 #ifdef PARANOIADIAG
1571 				pt_entry_t *ptep = pmap_pte_lookup(pmap, va);
1572 				pt_entry_t pt_entry = (ptep ? *ptep : 0);
1573 				if (!pte_valid_p(pt_entry)
1574 				    || pte_to_paddr(pt_entry) != pa)
1575 					printf(
1576 		"pmap_enter_pv: found va %#"PRIxVADDR" pa %#"PRIxPADDR" in pv_table but != %x\n",
1577 					    va, pa, pt_entry);
1578 #endif
1579 				PMAP_COUNT(remappings);
1580 				VM_PAGEMD_PVLIST_UNLOCK(mdpg);
1581 				if (__predict_false(apv != NULL))
1582 					pmap_pv_free(apv);
1583 				return;
1584 			}
1585 		}
1586 		if (__predict_true(apv == NULL)) {
1587 			/*
1588 			 * To allocate a PV, we have to release the PVLIST lock
1589 			 * so get the page generation.  We allocate the PV, and
1590 			 * then reacquire the lock.
1591 			 */
1592 			VM_PAGEMD_PVLIST_UNLOCK(mdpg);
1593 
1594 			apv = (pv_entry_t)pmap_pv_alloc();
1595 			if (apv == NULL)
1596 				panic("pmap_enter_pv: pmap_pv_alloc() failed");
1597 
1598 			/*
1599 			 * If the generation has changed, then someone else
1600 			 * tinkered with this page so we should
1601 			 * start over.
1602 			 */
1603 			uint16_t oldgen = gen;
1604 			gen = VM_PAGEMD_PVLIST_LOCK(mdpg, true);
1605 			if (gen != oldgen)
1606 				goto again;
1607 		}
1608 		npv = apv;
1609 		apv = NULL;
1610 		npv->pv_va = va;
1611 		npv->pv_pmap = pmap;
1612 		npv->pv_next = pv->pv_next;
1613 		pv->pv_next = npv;
1614 		PMAP_COUNT(mappings);
1615 	}
1616 	pmap_check_pvlist(pg);
1617 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
1618 	if (__predict_false(apv != NULL))
1619 		pmap_pv_free(apv);
1620 
1621 	UVMHIST_LOG(pmaphist, "<- done pv=%p%s",
1622 	    pv, first ? " (first pv)" : "",0,0);
1623 }
1624 
1625 /*
1626  * Remove a physical to virtual address translation.
1627  * If cache was inhibited on this page, and there are no more cache
1628  * conflicts, restore caching.
1629  * Flush the cache if the last page is removed (should always be cached
1630  * at this point).
1631  */
1632 void
1633 pmap_remove_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, bool dirty)
1634 {
1635 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
1636 	pv_entry_t pv, npv;
1637 	bool last;
1638 
1639 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
1640 	UVMHIST_LOG(pmaphist,
1641 	    "(pmap=%p va=%#"PRIxVADDR" pg=%p (pa %#"PRIxPADDR")\n",
1642 	    pmap, va, pg, VM_PAGE_TO_PHYS(pg));
1643 	UVMHIST_LOG(pmaphist, "dirty=%s)", dirty ? "true" : "false", 0,0,0);
1644 
1645 	KASSERT(kpreempt_disabled());
1646 	pv = &mdpg->mdpg_first;
1647 
1648 	(void)VM_PAGEMD_PVLIST_LOCK(mdpg, true);
1649 	pmap_check_pvlist(pg);
1650 
1651 	/*
1652 	 * If it is the first entry on the list, it is actually
1653 	 * in the header and we must copy the following entry up
1654 	 * to the header.  Otherwise we must search the list for
1655 	 * the entry.  In either case we free the now unused entry.
1656 	 */
1657 
1658 	last = false;
1659 	if (pmap == pv->pv_pmap && va == pv->pv_va) {
1660 		npv = pv->pv_next;
1661 		if (npv) {
1662 			*pv = *npv;
1663 			KASSERT(pv->pv_pmap != NULL);
1664 		} else {
1665 #ifdef __PMAP_VIRTUAL_CACHE_ALIASES
1666 			pmap_page_clear_attributes(pg, VM_PAGEMD_UNCACHED);
1667 #endif
1668 			pv->pv_pmap = NULL;
1669 			last = true;	/* Last mapping removed */
1670 		}
1671 		PMAP_COUNT(remove_pvfirst);
1672 	} else {
1673 		for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
1674 			PMAP_COUNT(remove_pvsearch);
1675 			if (pmap == npv->pv_pmap && va == npv->pv_va)
1676 				break;
1677 		}
1678 		if (npv) {
1679 			pv->pv_next = npv->pv_next;
1680 		}
1681 	}
1682 	pmap_md_vca_remove(pg, va);
1683 
1684 	pmap_check_pvlist(pg);
1685 	VM_PAGEMD_PVLIST_UNLOCK(mdpg);
1686 
1687 	/*
1688 	 * Free the pv_entry if needed.
1689 	 */
1690 	if (npv)
1691 		pmap_pv_free(npv);
1692 	if (VM_PAGEMD_EXECPAGE_P(mdpg) && dirty) {
1693 		if (last) {
1694 			/*
1695 			 * If this was the page's last mapping, we no longer
1696 			 * care about its execness.
1697 			 */
1698 			UVMHIST_LOG(pmapexechist,
1699 			    "pg %p (pa %#"PRIxPADDR")%s: %s",
1700 			    pg, VM_PAGE_TO_PHYS(pg),
1701 			    last ? " [last mapping]" : "",
1702 			    "execpage cleared");
1703 			pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
1704 			PMAP_COUNT(exec_uncached_remove);
1705 		} else {
1706 			/*
1707 			 * Someone still has it mapped as an executable page
1708 			 * so we must sync it.
1709 			 */
1710 			UVMHIST_LOG(pmapexechist,
1711 			    "pg %p (pa %#"PRIxPADDR")%s: %s",
1712 			    pg, VM_PAGE_TO_PHYS(pg),
1713 			    last ? " [last mapping]" : "",
1714 			    "performed syncicache");
1715 			pmap_page_syncicache(pg);
1716 			PMAP_COUNT(exec_synced_remove);
1717 		}
1718 	}
1719 	UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
1720 }
1721 
1722 #if defined(MULTIPROCESSOR)
1723 struct pmap_pvlist_info {
1724 	kmutex_t *pli_locks[PAGE_SIZE / 32];
1725 	volatile u_int pli_lock_refs[PAGE_SIZE / 32];
1726 	volatile u_int pli_lock_index;
1727 	u_int pli_lock_mask;
1728 } pmap_pvlist_info;
1729 
1730 void
1731 pmap_pvlist_lock_init(size_t cache_line_size)
1732 {
1733 	struct pmap_pvlist_info * const pli = &pmap_pvlist_info;
1734 	const vaddr_t lock_page = uvm_pageboot_alloc(PAGE_SIZE);
1735 	vaddr_t lock_va = lock_page;
1736 	if (sizeof(kmutex_t) > cache_line_size) {
1737 		cache_line_size = roundup2(sizeof(kmutex_t), cache_line_size);
1738 	}
1739 	const size_t nlocks = PAGE_SIZE / cache_line_size;
1740 	KASSERT((nlocks & (nlocks - 1)) == 0);
1741 	/*
1742 	 * Now divide the page into a number of mutexes, one per cacheline.
1743 	 */
1744 	for (size_t i = 0; i < nlocks; lock_va += cache_line_size, i++) {
1745 		kmutex_t * const lock = (kmutex_t *)lock_va;
1746 		mutex_init(lock, MUTEX_DEFAULT, IPL_VM);
1747 		pli->pli_locks[i] = lock;
1748 	}
1749 	pli->pli_lock_mask = nlocks - 1;
1750 }
1751 
1752 uint16_t
1753 pmap_pvlist_lock(struct vm_page_md *mdpg, bool list_change)
1754 {
1755 	struct pmap_pvlist_info * const pli = &pmap_pvlist_info;
1756 	kmutex_t *lock = mdpg->mdpg_lock;
1757 	int16_t gen;
1758 
1759 	/*
1760 	 * Allocate a lock on an as-needed basis.  This will hopefully give us
1761 	 * semi-random distribution not based on page color.
1762 	 */
1763 	if (__predict_false(lock == NULL)) {
1764 		size_t locknum = atomic_add_int_nv(&pli->pli_lock_index, 37);
1765 		size_t lockid = locknum & pli->pli_lock_mask;
1766 		kmutex_t * const new_lock = pli->pli_locks[lockid];
1767 		/*
1768 		 * Set the lock.  If some other thread already did, just use
1769 		 * the one they assigned.
1770 		 */
1771 		lock = atomic_cas_ptr(&mdpg->mdpg_lock, NULL, new_lock);
1772 		if (lock == NULL) {
1773 			lock = new_lock;
1774 			atomic_inc_uint(&pli->pli_lock_refs[lockid]);
1775 		}
1776 	}
1777 
1778 	/*
1779 	 * Now finally lock the pvlists.
1780 	 */
1781 	mutex_spin_enter(lock);
1782 
1783 	/*
1784 	 * If the locker will be changing the list, increment the high 16 bits
1785 	 * of attrs so we use that as a generation number.
1786 	 */
1787 	gen = VM_PAGEMD_PVLIST_GEN(mdpg);		/* get old value */
1788 	if (list_change)
1789 		atomic_add_int(&mdpg->mdpg_attrs, 0x10000);
1790 
1791 	/*
1792 	 * Return the generation number.
1793 	 */
1794 	return gen;
1795 }
1796 #else /* !MULTIPROCESSOR */
1797 void
1798 pmap_pvlist_lock_init(size_t cache_line_size)
1799 {
1800 	mutex_init(&pmap_pvlist_mutex, MUTEX_DEFAULT, IPL_VM);
1801 }
1802 
1803 #ifdef MODULAR
1804 uint16_t
1805 pmap_pvlist_lock(struct vm_page_md *mdpg, bool list_change)
1806 {
1807 	/*
1808 	 * We just use a global lock.
1809 	 */
1810 	if (__predict_false(mdpg->mdpg_lock == NULL)) {
1811 		mdpg->mdpg_lock = &pmap_pvlist_mutex;
1812 	}
1813 
1814 	/*
1815 	 * Now finally lock the pvlists.
1816 	 */
1817 	mutex_spin_enter(mdpg->mdpg_lock);
1818 
1819 	return 0;
1820 }
1821 #endif /* MODULAR */
1822 #endif /* !MULTIPROCESSOR */
1823 
1824 /*
1825  * pmap_pv_page_alloc:
1826  *
1827  *	Allocate a page for the pv_entry pool.
1828  */
1829 void *
1830 pmap_pv_page_alloc(struct pool *pp, int flags)
1831 {
1832 	struct vm_page *pg = PMAP_ALLOC_POOLPAGE(UVM_PGA_USERESERVE);
1833 	if (pg == NULL)
1834 		return NULL;
1835 
1836 	return (void *)pmap_map_poolpage(VM_PAGE_TO_PHYS(pg));
1837 }
1838 
1839 /*
1840  * pmap_pv_page_free:
1841  *
1842  *	Free a pv_entry pool page.
1843  */
1844 void
1845 pmap_pv_page_free(struct pool *pp, void *v)
1846 {
1847 	vaddr_t va = (vaddr_t)v;
1848 
1849 	KASSERT(pmap_md_direct_mapped_vaddr_p(va));
1850 	const paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
1851 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
1852 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
1853 	pmap_md_vca_remove(pg, va);
1854 	pmap_page_clear_attributes(mdpg, VM_PAGEMD_POOLPAGE);
1855 	uvm_pagefree(pg);
1856 }
1857 
1858 #ifdef PMAP_PREFER
1859 /*
1860  * Find first virtual address >= *vap that doesn't cause
1861  * a cache alias conflict.
1862  */
1863 void
1864 pmap_prefer(vaddr_t foff, vaddr_t *vap, vsize_t sz, int td)
1865 {
1866 	vaddr_t	va;
1867 	vsize_t d;
1868 	vsize_t prefer_mask = ptoa(uvmexp.colormask);
1869 
1870 	PMAP_COUNT(prefer_requests);
1871 
1872 	prefer_mask |= pmap_md_cache_prefer_mask();
1873 
1874 	if (prefer_mask) {
1875 		va = *vap;
1876 
1877 		d = foff - va;
1878 		d &= prefer_mask;
1879 		if (d) {
1880 			if (td)
1881 				*vap = trunc_page(va -((-d) & prefer_mask));
1882 			else
1883 				*vap = round_page(va + d);
1884 			PMAP_COUNT(prefer_adjustments);
1885 		}
1886 	}
1887 }
1888 #endif /* PMAP_PREFER */
1889 
1890 #ifdef PMAP_MAP_POOLPAGE
1891 vaddr_t
1892 pmap_map_poolpage(paddr_t pa)
1893 {
1894 
1895 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
1896 	KASSERT(pg);
1897 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
1898 	pmap_page_set_attributes(mdpg, VM_PAGEMD_POOLPAGE);
1899 
1900 	const vaddr_t va = pmap_md_map_poolpage(pa, NBPG);
1901 	pmap_md_vca_add(pg, va, NULL);
1902 	return va;
1903 }
1904 
1905 paddr_t
1906 pmap_unmap_poolpage(vaddr_t va)
1907 {
1908 
1909 	KASSERT(pmap_md_direct_mapped_vaddr_p(va));
1910 	paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
1911 
1912 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
1913 	KASSERT(pg);
1914 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
1915 	pmap_page_clear_attributes(mdpg, VM_PAGEMD_POOLPAGE);
1916 	pmap_md_unmap_poolpage(va, NBPG);
1917 	pmap_md_vca_remove(pg, va);
1918 
1919 	return pa;
1920 }
1921 #endif /* PMAP_MAP_POOLPAGE */
1922