xref: /netbsd-src/sys/uvm/pmap/pmap_tlb.c (revision 53d1339bf7f9c7367b35a9e1ebe693f9b047a47b)
1 /*	$NetBSD: pmap_tlb.c,v 1.44 2021/05/04 09:05:34 skrll Exp $	*/
2 
3 /*-
4  * Copyright (c) 2010 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Matt Thomas at 3am Software Foundry.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 
34 __KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v 1.44 2021/05/04 09:05:34 skrll Exp $");
35 
36 /*
37  * Manages address spaces in a TLB.
38  *
39  * Normally there is a 1:1 mapping between a TLB and a CPU.  However, some
40  * implementations may share a TLB between multiple CPUs (really CPU thread
41  * contexts).  This requires the TLB abstraction to be separated from the
42  * CPU abstraction.  It also requires that the TLB be locked while doing
43  * TLB activities.
44  *
45  * For each TLB, we track the ASIDs in use in a bitmap and a list of pmaps
46  * that have a valid ASID.
47  *
48  * We allocate ASIDs in increasing order until we have exhausted the supply,
49  * then reinitialize the ASID space, and start allocating again at 1.  When
50  * allocating from the ASID bitmap, we skip any ASID who has a corresponding
51  * bit set in the ASID bitmap.  Eventually this causes the ASID bitmap to fill
52  * and, when completely filled, a reinitialization of the ASID space.
53  *
54  * To reinitialize the ASID space, the ASID bitmap is reset and then the ASIDs
55  * of non-kernel TLB entries get recorded in the ASID bitmap.  If the entries
56  * in TLB consume more than half of the ASID space, all ASIDs are invalidated,
57  * the ASID bitmap is recleared, and the list of pmaps is emptied.  Otherwise,
58  * (the normal case), any ASID present in the TLB (even those which are no
59  * longer used by a pmap) will remain active (allocated) and all other ASIDs
60  * will be freed.  If the size of the TLB is much smaller than the ASID space,
61  * this algorithm completely avoids TLB invalidation.
62  *
63  * For multiprocessors, we also have to deal TLB invalidation requests from
64  * other CPUs, some of which are dealt with the reinitialization of the ASID
65  * space.  Whereas above we keep the ASIDs of those pmaps which have active
66  * TLB entries, this type of reinitialization preserves the ASIDs of any
67  * "onproc" user pmap and all other ASIDs will be freed.  We must do this
68  * since we can't change the current ASID.
69  *
70  * Each pmap has two bitmaps: pm_active and pm_onproc.  Each bit in pm_active
71  * indicates whether that pmap has an allocated ASID for a CPU.  Each bit in
72  * pm_onproc indicates that the pmap's ASID is in use, i.e. a CPU has it in its
73  * "current ASID" field, e.g. the ASID field of the COP 0 register EntryHi for
74  * MIPS, or the ASID field of TTBR0 for AA64.  The bit number used in these
75  * bitmaps comes from the CPU's cpu_index().  Even though these bitmaps contain
76  * the bits for all CPUs, the bits that  correspond to the bits belonging to
77  * the CPUs sharing a TLB can only be manipulated while holding that TLB's
78  * lock.  Atomic ops must be used to update them since multiple CPUs may be
79  * changing different sets of bits at same time but these sets never overlap.
80  *
81  * When a change to the local TLB may require a change in the TLB's of other
82  * CPUs, we try to avoid sending an IPI if at all possible.  For instance, if
83  * we are updating a PTE and that PTE previously was invalid and therefore
84  * couldn't support an active mapping, there's no need for an IPI since there
85  * can't be a TLB entry to invalidate.  The other case is when we change a PTE
86  * to be modified we just update the local TLB.  If another TLB has a stale
87  * entry, a TLB MOD exception will be raised and that will cause the local TLB
88  * to be updated.
89  *
90  * We never need to update a non-local TLB if the pmap doesn't have a valid
91  * ASID for that TLB.  If it does have a valid ASID but isn't current "onproc"
92  * we simply reset its ASID for that TLB and then when it goes "onproc" it
93  * will allocate a new ASID and any existing TLB entries will be orphaned.
94  * Only in the case that pmap has an "onproc" ASID do we actually have to send
95  * an IPI.
96  *
97  * Once we determined we must send an IPI to shootdown a TLB, we need to send
98  * it to one of CPUs that share that TLB.  We choose the lowest numbered CPU
99  * that has one of the pmap's ASID "onproc".  In reality, any CPU sharing that
100  * TLB would do, but interrupting an active CPU seems best.
101  *
102  * A TLB might have multiple shootdowns active concurrently.  The shootdown
103  * logic compresses these into a few cases:
104  *	0) nobody needs to have its TLB entries invalidated
105  *	1) one ASID needs to have its TLB entries invalidated
106  *	2) more than one ASID needs to have its TLB entries invalidated
107  *	3) the kernel needs to have its TLB entries invalidated
108  *	4) the kernel and one or more ASID need their TLB entries invalidated.
109  *
110  * And for each case we do:
111  *	0) nothing,
112  *	1) if that ASID is still "onproc", we invalidate the TLB entries for
113  *	   that single ASID.  If not, just reset the pmap's ASID to invalidate
114  *	   and let it allocate a new ASID the next time it goes "onproc",
115  *	2) we reinitialize the ASID space (preserving any "onproc" ASIDs) and
116  *	   invalidate all non-wired non-global TLB entries,
117  *	3) we invalidate all of the non-wired global TLB entries,
118  *	4) we reinitialize the ASID space (again preserving any "onproc" ASIDs)
119  *	   invalidate all non-wired TLB entries.
120  *
121  * As you can see, shootdowns are not concerned with addresses, just address
122  * spaces.  Since the number of TLB entries is usually quite small, this avoids
123  * a lot of overhead for not much gain.
124  */
125 
126 #define __PMAP_PRIVATE
127 
128 #include "opt_multiprocessor.h"
129 
130 #include <sys/param.h>
131 
132 #include <sys/atomic.h>
133 #include <sys/cpu.h>
134 #include <sys/kernel.h>			/* for cold */
135 #include <sys/mutex.h>
136 #include <sys/proc.h>
137 #include <sys/systm.h>
138 
139 #include <uvm/uvm.h>
140 
141 static kmutex_t pmap_tlb0_lock __cacheline_aligned;
142 
143 #define	IFCONSTANT(x)	(__builtin_constant_p((x)) ? (x) : 0)
144 
145 #if KERNEL_PID > 31
146 #error "KERNEL_PID expected in range 0-31"
147 #endif
148 
149 #define	TLBINFO_ASID_MARK_UNUSED(ti, asid) \
150 	__BITMAP_CLR((asid), &(ti)->ti_asid_bitmap)
151 #define	TLBINFO_ASID_MARK_USED(ti, asid) \
152 	__BITMAP_SET((asid), &(ti)->ti_asid_bitmap)
153 #define	TLBINFO_ASID_INUSE_P(ti, asid) \
154 	__BITMAP_ISSET((asid), &(ti)->ti_asid_bitmap)
155 #define	TLBINFO_ASID_RESET(ti) \
156 	do {								\
157 		__BITMAP_ZERO(&ti->ti_asid_bitmap);			\
158 		for (tlb_asid_t asid = 0; asid <= KERNEL_PID; asid++) 	\
159 			TLBINFO_ASID_MARK_USED(ti, asid);	 	\
160 	} while (0)
161 #define	TLBINFO_ASID_INITIAL_FREE(asid_max) \
162 	(asid_max + 1 /* 0 */ - (1 + KERNEL_PID))
163 
164 struct pmap_tlb_info pmap_tlb0_info = {
165 	.ti_name = "tlb0",
166 	.ti_asid_hint = KERNEL_PID + 1,
167 #ifdef PMAP_TLB_NUM_PIDS
168 	.ti_asid_max = IFCONSTANT(PMAP_TLB_NUM_PIDS - 1),
169 	.ti_asids_free = IFCONSTANT(
170 		TLBINFO_ASID_INITIAL_FREE(PMAP_TLB_NUM_PIDS - 1)),
171 #endif
172 	.ti_asid_bitmap._b[0] = __BITS(0, KERNEL_PID),
173 #ifdef PMAP_TLB_WIRED_UPAGES
174 	.ti_wired = PMAP_TLB_WIRED_UPAGES,
175 #endif
176 	.ti_lock = &pmap_tlb0_lock,
177 	.ti_pais = LIST_HEAD_INITIALIZER(pmap_tlb0_info.ti_pais),
178 #if defined(MULTIPROCESSOR) && PMAP_TLB_MAX > 1
179 	.ti_tlbinvop = TLBINV_NOBODY,
180 #endif
181 };
182 
183 #undef IFCONSTANT
184 
185 #if defined(MULTIPROCESSOR) && PMAP_TLB_MAX > 1
186 struct pmap_tlb_info *pmap_tlbs[PMAP_TLB_MAX] = {
187 	[0] = &pmap_tlb0_info,
188 };
189 u_int pmap_ntlbs = 1;
190 #endif
191 
192 #ifdef MULTIPROCESSOR
193 __unused static inline bool
194 pmap_tlb_intersecting_active_p(pmap_t pm, struct pmap_tlb_info *ti)
195 {
196 #if PMAP_TLB_MAX == 1
197 	return !kcpuset_iszero(pm->pm_active);
198 #else
199 	return kcpuset_intersecting_p(pm->pm_active, ti->ti_kcpuset);
200 #endif
201 }
202 
203 static inline bool
204 pmap_tlb_intersecting_onproc_p(pmap_t pm, struct pmap_tlb_info *ti)
205 {
206 #if PMAP_TLB_MAX == 1
207 	return !kcpuset_iszero(pm->pm_onproc);
208 #else
209 	return kcpuset_intersecting_p(pm->pm_onproc, ti->ti_kcpuset);
210 #endif
211 }
212 #endif
213 
214 static void
215 pmap_tlb_pai_check(struct pmap_tlb_info *ti, bool locked_p)
216 {
217 	UVMHIST_FUNC(__func__);
218 	UVMHIST_CALLARGS(maphist, "(ti=%#jx)", (uintptr_t)ti, 0, 0, 0);
219 
220 #ifdef DIAGNOSTIC
221 	struct pmap_asid_info *pai;
222 	if (!locked_p)
223 		TLBINFO_LOCK(ti);
224 	LIST_FOREACH(pai, &ti->ti_pais, pai_link) {
225 		KASSERT(pai != NULL);
226 		KASSERT(PAI_PMAP(pai, ti) != pmap_kernel());
227 		KASSERT(pai->pai_asid > KERNEL_PID);
228 		KASSERTMSG(pai->pai_asid <= ti->ti_asid_max,
229 		    "pm %p asid %#x", PAI_PMAP(pai, ti), pai->pai_asid);
230 		KASSERTMSG(TLBINFO_ASID_INUSE_P(ti, pai->pai_asid),
231 		    "pm %p asid %u", PAI_PMAP(pai, ti), pai->pai_asid);
232 #ifdef MULTIPROCESSOR
233 		KASSERT(pmap_tlb_intersecting_active_p(PAI_PMAP(pai, ti), ti));
234 #endif
235 	}
236 	if (!locked_p)
237 		TLBINFO_UNLOCK(ti);
238 #endif
239 	UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
240 }
241 
242 static void
243 pmap_tlb_pai_reset(struct pmap_tlb_info *ti, struct pmap_asid_info *pai,
244 	struct pmap *pm)
245 {
246 	UVMHIST_FUNC(__func__);
247 	UVMHIST_CALLARGS(maphist, "(ti=%#jx, pai=%#jx, pm=%#jx): asid %u",
248 	    (uintptr_t)ti, (uintptr_t)pai, (uintptr_t)pm, pai->pai_asid);
249 
250 	/*
251 	 * We must have an ASID but it must not be onproc (on a processor).
252 	 */
253 	KASSERT(pai->pai_asid > KERNEL_PID);
254 	KASSERT(pai->pai_asid <= ti->ti_asid_max);
255 #if defined(MULTIPROCESSOR)
256 	KASSERT(pmap_tlb_intersecting_active_p(pm, ti));
257 	KASSERT(!pmap_tlb_intersecting_onproc_p(pm, ti));
258 #endif
259 	LIST_REMOVE(pai, pai_link);
260 #ifdef DIAGNOSTIC
261 	pai->pai_link.le_prev = NULL;	/* tagged as unlinked */
262 #endif
263 	/*
264 	 * If the platform has a cheap way to flush ASIDs then free the ASID
265 	 * back into the pool.  On multiprocessor systems, we will flush the
266 	 * ASID from the TLB when it's allocated.  That way we know the flush
267 	 * was always done in the correct TLB space.  On uniprocessor systems,
268 	 * just do the flush now since we know that it has been used.  This has
269 	 * a bit less overhead.  Either way, this will mean that we will only
270 	 * need to flush all ASIDs if all ASIDs are in use and we need to
271 	 * allocate a new one.
272 	 */
273 	if (PMAP_TLB_FLUSH_ASID_ON_RESET) {
274 #ifndef MULTIPROCESSOR
275 		UVMHIST_LOG(maphist, " ... asid %u flushed", pai->pai_asid, 0,
276 		    0, 0);
277 		tlb_invalidate_asids(pai->pai_asid, pai->pai_asid);
278 #endif
279 		if (TLBINFO_ASID_INUSE_P(ti, pai->pai_asid)) {
280 			UVMHIST_LOG(maphist, " ... asid marked unused",
281 			    pai->pai_asid, 0, 0, 0);
282 			TLBINFO_ASID_MARK_UNUSED(ti, pai->pai_asid);
283 			ti->ti_asids_free++;
284 		}
285 	}
286 	/*
287 	 * Note that we don't mark the ASID as not in use in the TLB's ASID
288 	 * bitmap (thus it can't be allocated until the ASID space is exhausted
289 	 * and therefore reinitialized).  We don't want to flush the TLB for
290 	 * entries belonging to this ASID so we will let natural TLB entry
291 	 * replacement flush them out of the TLB.  Any new entries for this
292 	 * pmap will need a new ASID allocated.
293 	 */
294 	pai->pai_asid = 0;
295 
296 #if defined(MULTIPROCESSOR)
297 	/*
298 	 * The bits in pm_active belonging to this TLB can only be changed
299 	 * while this TLB's lock is held.
300 	 */
301 #if PMAP_TLB_MAX == 1
302 	kcpuset_zero(pm->pm_active);
303 #else
304 	kcpuset_remove(pm->pm_active, ti->ti_kcpuset);
305 #endif
306 	KASSERT(!pmap_tlb_intersecting_active_p(pm, ti));
307 #endif /* MULTIPROCESSOR */
308 
309 	UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
310 }
311 
312 void
313 pmap_tlb_info_evcnt_attach(struct pmap_tlb_info *ti)
314 {
315 #if defined(MULTIPROCESSOR) && !defined(PMAP_TLB_NO_SYNCI_EVCNT)
316 	evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_desired,
317 	    EVCNT_TYPE_MISC, NULL,
318 	    ti->ti_name, "icache syncs desired");
319 	evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_asts,
320 	    EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_desired,
321 	    ti->ti_name, "icache sync asts");
322 	evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_all,
323 	    EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_asts,
324 	    ti->ti_name, "icache full syncs");
325 	evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_pages,
326 	    EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_asts,
327 	    ti->ti_name, "icache pages synced");
328 	evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_duplicate,
329 	    EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_desired,
330 	    ti->ti_name, "icache dup pages skipped");
331 	evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_deferred,
332 	    EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_desired,
333 	    ti->ti_name, "icache pages deferred");
334 #endif /* MULTIPROCESSOR && !PMAP_TLB_NO_SYNCI_EVCNT */
335 	evcnt_attach_dynamic_nozero(&ti->ti_evcnt_asid_reinits,
336 	    EVCNT_TYPE_MISC, NULL,
337 	    ti->ti_name, "asid pool reinit");
338 }
339 
340 void
341 pmap_tlb_info_init(struct pmap_tlb_info *ti)
342 {
343 #if defined(MULTIPROCESSOR)
344 #if PMAP_TLB_MAX == 1
345 	KASSERT(ti == &pmap_tlb0_info);
346 #else
347 	if (ti != &pmap_tlb0_info) {
348 		KASSERT(pmap_ntlbs < PMAP_TLB_MAX);
349 
350 		KASSERT(pmap_tlbs[pmap_ntlbs] == NULL);
351 
352 		ti->ti_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
353 		TLBINFO_ASID_RESET(ti);
354 		ti->ti_asid_hint = KERNEL_PID + 1;
355 		ti->ti_asid_max = pmap_tlbs[0]->ti_asid_max;
356 		ti->ti_asids_free = TLBINFO_ASID_INITIAL_FREE(ti->ti_asid_max);
357 		ti->ti_tlbinvop = TLBINV_NOBODY;
358 		ti->ti_victim = NULL;
359 		kcpuset_create(&ti->ti_kcpuset, true);
360 		ti->ti_index = pmap_ntlbs++;
361 		ti->ti_wired = 0;
362 		pmap_tlbs[ti->ti_index] = ti;
363 		snprintf(ti->ti_name, sizeof(ti->ti_name), "tlb%u",
364 		    ti->ti_index);
365 		pmap_tlb_info_evcnt_attach(ti);
366 
367 		KASSERT(ti->ti_asid_max < PMAP_TLB_BITMAP_LENGTH);
368 		return;
369 	}
370 #endif
371 #endif /* MULTIPROCESSOR */
372 	KASSERT(ti == &pmap_tlb0_info);
373 	KASSERT(ti->ti_lock == &pmap_tlb0_lock);
374 
375 	mutex_init(ti->ti_lock, MUTEX_DEFAULT, IPL_SCHED);
376 #if defined(MULTIPROCESSOR) && PMAP_TLB_MAX > 1
377 	kcpuset_create(&ti->ti_kcpuset, true);
378 	kcpuset_set(ti->ti_kcpuset, cpu_index(curcpu()));
379 #endif
380 
381 	if (ti->ti_asid_max == 0) {
382 		ti->ti_asid_max = pmap_md_tlb_asid_max();
383 		ti->ti_asids_free = TLBINFO_ASID_INITIAL_FREE(ti->ti_asid_max);
384 	}
385 
386 	KASSERT(ti->ti_asid_max < PMAP_TLB_BITMAP_LENGTH);
387 }
388 
389 #if defined(MULTIPROCESSOR)
390 void
391 pmap_tlb_info_attach(struct pmap_tlb_info *ti, struct cpu_info *ci)
392 {
393 	KASSERT(!CPU_IS_PRIMARY(ci));
394 	KASSERT(ci->ci_data.cpu_idlelwp != NULL);
395 	KASSERT(cold);
396 
397 	TLBINFO_LOCK(ti);
398 #if PMAP_TLB_MAX > 1
399 	kcpuset_set(ti->ti_kcpuset, cpu_index(ci));
400 	cpu_set_tlb_info(ci, ti);
401 #endif
402 
403 	/*
404 	 * Do any MD tlb info init.
405 	 */
406 	pmap_md_tlb_info_attach(ti, ci);
407 
408 	/*
409 	 * The kernel pmap uses the kcpuset_running set so it's always
410 	 * up-to-date.
411 	 */
412 	TLBINFO_UNLOCK(ti);
413 }
414 #endif /* MULTIPROCESSOR */
415 
416 #ifdef DIAGNOSTIC
417 static size_t
418 pmap_tlb_asid_count(struct pmap_tlb_info *ti)
419 {
420 	size_t count = 0;
421 	for (tlb_asid_t asid = 1; asid <= ti->ti_asid_max; asid++) {
422 		if (TLBINFO_ASID_INUSE_P(ti, asid))
423 			count++;
424 	}
425 	return count;
426 }
427 #endif
428 
429 static void
430 pmap_tlb_asid_reinitialize(struct pmap_tlb_info *ti, enum tlb_invalidate_op op)
431 {
432 	UVMHIST_FUNC(__func__);
433 	UVMHIST_CALLARGS(maphist, "(ti=%#jx, op=%ju)", (uintptr_t)ti, op, 0, 0);
434 
435 	pmap_tlb_pai_check(ti, true);
436 
437 	ti->ti_evcnt_asid_reinits.ev_count++;
438 
439 	/*
440 	 * First, clear the ASID bitmap (except for ASID 0 which belongs
441 	 * to the kernel).
442 	 */
443 	ti->ti_asids_free = TLBINFO_ASID_INITIAL_FREE(ti->ti_asid_max);
444 	ti->ti_asid_hint = KERNEL_PID + 1;
445 	TLBINFO_ASID_RESET(ti);
446 
447 	switch (op) {
448 #if defined(MULTIPROCESSOR) && defined(PMAP_TLB_NEED_SHOOTDOWN)
449 	case TLBINV_ALL:
450 		tlb_invalidate_all();
451 		break;
452 	case TLBINV_ALLUSER:
453 		tlb_invalidate_asids(KERNEL_PID + 1, ti->ti_asid_max);
454 		break;
455 #endif /* MULTIPROCESSOR && PMAP_TLB_NEED_SHOOTDOWN */
456 	case TLBINV_NOBODY: {
457 		/*
458 		 * If we are just reclaiming ASIDs in the TLB, let's go find
459 		 * what ASIDs are in use in the TLB.  Since this is a
460 		 * semi-expensive operation, we don't want to do it too often.
461 		 * So if more half of the ASIDs are in use, we don't have
462 		 * enough free ASIDs so invalidate the TLB entries with ASIDs
463 		 * and clear the ASID bitmap.  That will force everyone to
464 		 * allocate a new ASID.
465 		 */
466 #if !defined(MULTIPROCESSOR) || defined(PMAP_TLB_NEED_SHOOTDOWN)
467 		pmap_tlb_asid_check();
468 		const u_int asids_found = tlb_record_asids(
469 		    ti->ti_asid_bitmap._b, ti->ti_asid_max);
470 		pmap_tlb_asid_check();
471 #ifdef DIAGNOSTIC
472 		const u_int asids_count = pmap_tlb_asid_count(ti);
473 #endif
474 		KASSERTMSG(asids_found == asids_count,
475 		    "found %u != count %u", asids_found, asids_count);
476 		if (__predict_false(asids_found >= ti->ti_asid_max / 2)) {
477 			tlb_invalidate_asids(KERNEL_PID + 1, ti->ti_asid_max);
478 #else /* MULTIPROCESSOR && !PMAP_TLB_NEED_SHOOTDOWN */
479 			/*
480 			 * For those systems (PowerPC) that don't require
481 			 * cross cpu TLB shootdowns, we have to invalidate the
482 			 * entire TLB because we can't record the ASIDs in use
483 			 * on the other CPUs.  This is hopefully cheaper than
484 			 * than trying to use an IPI to record all the ASIDs
485 			 * on all the CPUs (which would be a synchronization
486 			 * nightmare).
487 			 */
488 			tlb_invalidate_all();
489 #endif /* MULTIPROCESSOR && !PMAP_TLB_NEED_SHOOTDOWN */
490 			TLBINFO_ASID_RESET(ti);
491 			ti->ti_asids_free = TLBINFO_ASID_INITIAL_FREE(
492 				ti->ti_asid_max);
493 #if !defined(MULTIPROCESSOR) || defined(PMAP_TLB_NEED_SHOOTDOWN)
494 		} else {
495 			ti->ti_asids_free -= asids_found;
496 		}
497 #endif /* !MULTIPROCESSOR || PMAP_TLB_NEED_SHOOTDOWN */
498 		KASSERTMSG(ti->ti_asids_free <= ti->ti_asid_max, "%u",
499 		    ti->ti_asids_free);
500 		break;
501 	}
502 	default:
503 		panic("%s: unexpected op %d", __func__, op);
504 	}
505 
506 	/*
507 	 * Now go through the active ASIDs.  If the ASID is on a processor or
508 	 * we aren't invalidating all ASIDs and the TLB has an entry owned by
509 	 * that ASID, mark it as in use.  Otherwise release the ASID.
510 	 */
511 	struct pmap_asid_info *pai, *next;
512 	for (pai = LIST_FIRST(&ti->ti_pais); pai != NULL; pai = next) {
513 		struct pmap * const pm = PAI_PMAP(pai, ti);
514 		next = LIST_NEXT(pai, pai_link);
515 		KASSERT(pm != pmap_kernel());
516 		KASSERT(pai->pai_asid > KERNEL_PID);
517 #if defined(MULTIPROCESSOR)
518 		if (pmap_tlb_intersecting_onproc_p(pm, ti)) {
519 			if (!TLBINFO_ASID_INUSE_P(ti, pai->pai_asid)) {
520 				TLBINFO_ASID_MARK_USED(ti, pai->pai_asid);
521 				ti->ti_asids_free--;
522 			}
523 			continue;
524 		}
525 #endif /* MULTIPROCESSOR */
526 		if (TLBINFO_ASID_INUSE_P(ti, pai->pai_asid)) {
527 			KASSERT(op == TLBINV_NOBODY);
528 		} else {
529 			pmap_tlb_pai_reset(ti, pai, pm);
530 		}
531 	}
532 #ifdef DIAGNOSTIC
533 	size_t free_count __diagused = ti->ti_asid_max - pmap_tlb_asid_count(ti);
534 	KASSERTMSG(free_count == ti->ti_asids_free,
535 	    "bitmap error: %zu != %u", free_count, ti->ti_asids_free);
536 #endif
537 	UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
538 }
539 
540 #if defined(MULTIPROCESSOR) && defined(PMAP_TLB_NEED_SHOOTDOWN)
541 #if PMAP_TLB_MAX == 1
542 #error shootdown not required for single TLB systems
543 #endif
544 void
545 pmap_tlb_shootdown_process(void)
546 {
547 	struct cpu_info * const ci = curcpu();
548 	struct pmap_tlb_info * const ti = cpu_tlb_info(ci);
549 #ifdef DIAGNOSTIC
550 	struct pmap * const pm = curlwp->l_proc->p_vmspace->vm_map.pmap;
551 #endif
552 
553 	KASSERT(cpu_intr_p());
554 	KASSERTMSG(ci->ci_cpl >= IPL_SCHED, "%s: cpl (%d) < IPL_SCHED (%d)",
555 	    __func__, ci->ci_cpl, IPL_SCHED);
556 
557 	TLBINFO_LOCK(ti);
558 
559 	switch (ti->ti_tlbinvop) {
560 	case TLBINV_ONE: {
561 		/*
562 		 * We only need to invalidate one user ASID.
563 		 */
564 		struct pmap_asid_info * const pai = PMAP_PAI(ti->ti_victim, ti);
565 		KASSERT(ti->ti_victim != pmap_kernel());
566 		if (pmap_tlb_intersecting_onproc_p(ti->ti_victim, ti)) {
567 			/*
568 			 * The victim is an active pmap so we will just
569 			 * invalidate its TLB entries.
570 			 */
571 			KASSERT(pai->pai_asid > KERNEL_PID);
572 			pmap_tlb_asid_check();
573 			tlb_invalidate_asids(pai->pai_asid, pai->pai_asid);
574 			pmap_tlb_asid_check();
575 		} else if (pai->pai_asid) {
576 			/*
577 			 * The victim is no longer an active pmap for this TLB.
578 			 * So simply clear its ASID and when pmap_activate is
579 			 * next called for this pmap, it will allocate a new
580 			 * ASID.
581 			 */
582 			KASSERT(!pmap_tlb_intersecting_onproc_p(pm, ti));
583 			pmap_tlb_pai_reset(ti, pai, PAI_PMAP(pai, ti));
584 		}
585 		break;
586 	}
587 	case TLBINV_ALLUSER:
588 		/*
589 		 * Flush all user TLB entries.
590 		 */
591 		pmap_tlb_asid_reinitialize(ti, TLBINV_ALLUSER);
592 		break;
593 	case TLBINV_ALLKERNEL:
594 		/*
595 		 * We need to invalidate all global TLB entries.
596 		 */
597 		pmap_tlb_asid_check();
598 		tlb_invalidate_globals();
599 		pmap_tlb_asid_check();
600 		break;
601 	case TLBINV_ALL:
602 		/*
603 		 * Flush all the TLB entries (user and kernel).
604 		 */
605 		pmap_tlb_asid_reinitialize(ti, TLBINV_ALL);
606 		break;
607 	case TLBINV_NOBODY:
608 		/*
609 		 * Might be spurious or another SMT CPU sharing this TLB
610 		 * could have already done the work.
611 		 */
612 		break;
613 	}
614 
615 	/*
616 	 * Indicate we are done with shutdown event.
617 	 */
618 	ti->ti_victim = NULL;
619 	ti->ti_tlbinvop = TLBINV_NOBODY;
620 	TLBINFO_UNLOCK(ti);
621 }
622 
623 /*
624  * This state machine could be encoded into an array of integers but since all
625  * the values fit in 3 bits, the 5 entry "table" fits in a 16 bit value which
626  * can be loaded in a single instruction.
627  */
628 #define	TLBINV_MAP(op, nobody, one, alluser, allkernel, all)	\
629 	((((   (nobody) << 3 * TLBINV_NOBODY)			\
630 	 | (      (one) << 3 * TLBINV_ONE)			\
631 	 | (  (alluser) << 3 * TLBINV_ALLUSER)			\
632 	 | ((allkernel) << 3 * TLBINV_ALLKERNEL)		\
633 	 | (      (all) << 3 * TLBINV_ALL)) >> 3 * (op)) & 7)
634 
635 #define	TLBINV_USER_MAP(op)	\
636 	TLBINV_MAP(op, TLBINV_ONE, TLBINV_ALLUSER, TLBINV_ALLUSER,	\
637 	    TLBINV_ALL, TLBINV_ALL)
638 
639 #define	TLBINV_KERNEL_MAP(op)	\
640 	TLBINV_MAP(op, TLBINV_ALLKERNEL, TLBINV_ALL, TLBINV_ALL,	\
641 	    TLBINV_ALLKERNEL, TLBINV_ALL)
642 
643 bool
644 pmap_tlb_shootdown_bystanders(pmap_t pm)
645 {
646 	/*
647 	 * We don't need to deal with our own TLB.
648 	 */
649 
650 	UVMHIST_FUNC(__func__);
651 	UVMHIST_CALLARGS(maphist, "pm %#jx", (uintptr_t)pm, 0, 0, 0);
652 
653 	const struct cpu_info * const ci = curcpu();
654 	kcpuset_t *pm_active = ci->ci_shootdowncpus;
655 	kcpuset_copy(pm_active, pm->pm_active);
656 	kcpuset_remove(pm_active, cpu_tlb_info(curcpu())->ti_kcpuset);
657 	const bool kernel_p = (pm == pmap_kernel());
658 	bool ipi_sent = false;
659 
660 	/*
661 	 * If pm_active gets more bits set, then it's after all our changes
662 	 * have been made so they will already be cognizant of them.
663 	 */
664 
665 	for (size_t i = 0; !kcpuset_iszero(pm_active); i++) {
666 		KASSERT(i < pmap_ntlbs);
667 		struct pmap_tlb_info * const ti = pmap_tlbs[i];
668 		KASSERT(tlbinfo_index(ti) == i);
669 		/*
670 		 * Skip this TLB if there are no active mappings for it.
671 		 */
672 		if (!kcpuset_intersecting_p(pm_active, ti->ti_kcpuset))
673 			continue;
674 		struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
675 		kcpuset_remove(pm_active, ti->ti_kcpuset);
676 		TLBINFO_LOCK(ti);
677 		cpuid_t j = kcpuset_ffs_intersecting(pm->pm_onproc,
678 		    ti->ti_kcpuset);
679 		// post decrement since ffs returns bit + 1 or 0 if no bit
680 		if (j-- > 0) {
681 			if (kernel_p) {
682 				ti->ti_tlbinvop =
683 				    TLBINV_KERNEL_MAP(ti->ti_tlbinvop);
684 				ti->ti_victim = NULL;
685 			} else {
686 				KASSERT(pai->pai_asid);
687 				if (__predict_false(ti->ti_victim == pm)) {
688 					KASSERT(ti->ti_tlbinvop == TLBINV_ONE);
689 					/*
690 					 * We still need to invalidate this one
691 					 * ASID so there's nothing to change.
692 					 */
693 				} else {
694 					ti->ti_tlbinvop =
695 					    TLBINV_USER_MAP(ti->ti_tlbinvop);
696 					if (ti->ti_tlbinvop == TLBINV_ONE)
697 						ti->ti_victim = pm;
698 					else
699 						ti->ti_victim = NULL;
700 				}
701 			}
702 			TLBINFO_UNLOCK(ti);
703 			/*
704 			 * Now we can send out the shootdown IPIs to a CPU
705 			 * that shares this TLB and is currently using this
706 			 * pmap.  That CPU will process the IPI and do the
707 			 * all the work.  Any other CPUs sharing that TLB
708 			 * will take advantage of that work.  pm_onproc might
709 			 * change now that we have released the lock but we
710 			 * can tolerate spurious shootdowns.
711 			 */
712 			cpu_send_ipi(cpu_lookup(j), IPI_SHOOTDOWN);
713 			ipi_sent = true;
714 			continue;
715 		}
716 		if (!pmap_tlb_intersecting_active_p(pm, ti)) {
717 			/*
718 			 * If this pmap has an ASID assigned but it's not
719 			 * currently running, nuke its ASID.  Next time the
720 			 * pmap is activated, it will allocate a new ASID.
721 			 * And best of all, we avoid an IPI.
722 			 */
723 			KASSERT(!kernel_p);
724 			pmap_tlb_pai_reset(ti, pai, pm);
725 			//ti->ti_evcnt_lazy_shots.ev_count++;
726 		}
727 		TLBINFO_UNLOCK(ti);
728 	}
729 
730 	UVMHIST_LOG(maphist, " <-- done (ipi_sent=%jd)", ipi_sent, 0, 0, 0);
731 
732 	return ipi_sent;
733 }
734 #endif /* MULTIPROCESSOR && PMAP_TLB_NEED_SHOOTDOWN */
735 
736 int
737 pmap_tlb_update_addr(pmap_t pm, vaddr_t va, pt_entry_t pte, u_int flags)
738 {
739 	struct pmap_tlb_info * const ti = cpu_tlb_info(curcpu());
740 	struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
741 	int rv = -1;
742 
743 	UVMHIST_FUNC(__func__);
744 	UVMHIST_CALLARGS(maphist, " (pm=%#jx va=%#jx, pte=%#jx flags=%#jx)",
745 	    (uintptr_t)pm, va, pte_value(pte), flags);
746 
747 	KASSERT(kpreempt_disabled());
748 
749 	KASSERTMSG(pte_valid_p(pte), "va %#"PRIxVADDR" %#"PRIxPTE,
750 	    va, pte_value(pte));
751 
752 	TLBINFO_LOCK(ti);
753 	if (pm == pmap_kernel() || PMAP_PAI_ASIDVALID_P(pai, ti)) {
754 		pmap_tlb_asid_check();
755 		rv = tlb_update_addr(va, pai->pai_asid, pte,
756 		    (flags & PMAP_TLB_INSERT) != 0);
757 		pmap_tlb_asid_check();
758 		UVMHIST_LOG(maphist,
759 		    "   %jd <-- tlb_update_addr(%#jx, %#jx, %#jx, ...)",
760 		    rv, va, pai->pai_asid, pte_value(pte));
761 		KASSERTMSG((flags & PMAP_TLB_INSERT) == 0 || rv == 1,
762 		    "pmap %p (asid %u) va %#"PRIxVADDR" pte %#"PRIxPTE" rv %d",
763 		    pm, pai->pai_asid, va, pte_value(pte), rv);
764 	}
765 #if defined(MULTIPROCESSOR) && defined(PMAP_TLB_NEED_SHOOTDOWN)
766 	if (flags & PMAP_TLB_NEED_IPI)
767 		pm->pm_shootdown_pending = 1;
768 #endif
769 	TLBINFO_UNLOCK(ti);
770 
771 	UVMHIST_LOG(maphist, "   <-- done (rv=%jd)", rv, 0, 0, 0);
772 
773 	return rv;
774 }
775 
776 void
777 pmap_tlb_invalidate_addr(pmap_t pm, vaddr_t va)
778 {
779 	struct pmap_tlb_info * const ti = cpu_tlb_info(curcpu());
780 	struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
781 
782 	UVMHIST_FUNC(__func__);
783 	UVMHIST_CALLARGS(maphist, " (pm=%#jx va=%#jx) ti=%#jx asid=%#jx",
784 	    (uintptr_t)pm, va, (uintptr_t)ti, pai->pai_asid);
785 
786 	KASSERT(kpreempt_disabled());
787 
788 	TLBINFO_LOCK(ti);
789 	if (pm == pmap_kernel() || PMAP_PAI_ASIDVALID_P(pai, ti)) {
790 		pmap_tlb_asid_check();
791 		UVMHIST_LOG(maphist, " invalidating %#jx asid %#jx",
792 		    va, pai->pai_asid, 0, 0);
793 		tlb_invalidate_addr(va, pai->pai_asid);
794 		pmap_tlb_asid_check();
795 	}
796 #if defined(MULTIPROCESSOR) && defined(PMAP_TLB_NEED_SHOOTDOWN)
797 	pm->pm_shootdown_pending = 1;
798 #endif
799 	TLBINFO_UNLOCK(ti);
800 	UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
801 }
802 
803 static inline void
804 pmap_tlb_asid_alloc(struct pmap_tlb_info *ti, pmap_t pm,
805 	struct pmap_asid_info *pai)
806 {
807 	/*
808 	 * We shouldn't have an ASID assigned, and thusly must not be onproc
809 	 * nor active.
810 	 */
811 	KASSERT(pm != pmap_kernel());
812 	KASSERT(pai->pai_asid == 0);
813 	KASSERT(pai->pai_link.le_prev == NULL);
814 #if defined(MULTIPROCESSOR)
815 	KASSERT(!pmap_tlb_intersecting_onproc_p(pm, ti));
816 	KASSERT(!pmap_tlb_intersecting_active_p(pm, ti));
817 #endif
818 	KASSERT(ti->ti_asids_free > 0);
819 	KASSERT(ti->ti_asid_hint > KERNEL_PID);
820 
821 	/*
822 	 * If the last ASID allocated was the maximum ASID, then the
823 	 * hint will be out of range.  Reset the hint to first
824 	 * available ASID.
825 	 */
826 	if (PMAP_TLB_FLUSH_ASID_ON_RESET
827 	    && ti->ti_asid_hint > ti->ti_asid_max) {
828 		ti->ti_asid_hint = KERNEL_PID + 1;
829 	}
830 	KASSERTMSG(ti->ti_asid_hint <= ti->ti_asid_max, "hint %u",
831 	    ti->ti_asid_hint);
832 
833 	/*
834 	 * Let's see if the hinted ASID is free.  If not search for
835 	 * a new one.
836 	 */
837 	if (__predict_true(TLBINFO_ASID_INUSE_P(ti, ti->ti_asid_hint))) {
838 		const size_t nbpw = NBBY * sizeof(ti->ti_asid_bitmap._b[0]);
839 		size_t i;
840 		u_long bits;
841 		for (i = 0; (bits = ~ti->ti_asid_bitmap._b[i]) == 0; i++) {
842 			KASSERT(i < __arraycount(ti->ti_asid_bitmap._b) - 1);
843 		}
844 		/*
845 		 * ffs wants to find the first bit set while we want
846 		 * to find the first bit cleared.
847 		 */
848 		const u_int n = __builtin_ffsl(bits) - 1;
849 		KASSERTMSG((bits << (nbpw - (n+1))) == (1ul << (nbpw-1)),
850 		    "n %u bits %#lx", n, bits);
851 		KASSERT(n < nbpw);
852 		ti->ti_asid_hint = n + i * nbpw;
853 	}
854 
855 	KASSERT(ti->ti_asid_hint > KERNEL_PID);
856 	KASSERT(ti->ti_asid_hint <= ti->ti_asid_max);
857 	KASSERTMSG(PMAP_TLB_FLUSH_ASID_ON_RESET
858 	    || TLBINFO_ASID_INUSE_P(ti, ti->ti_asid_hint - 1),
859 	    "hint %u bitmap %p", ti->ti_asid_hint, &ti->ti_asid_bitmap);
860 	KASSERTMSG(!TLBINFO_ASID_INUSE_P(ti, ti->ti_asid_hint),
861 	    "hint %u bitmap %p", ti->ti_asid_hint, &ti->ti_asid_bitmap);
862 
863 	/*
864 	 * The hint contains our next ASID so take it and advance the hint.
865 	 * Mark it as used and insert the pai into the list of active asids.
866 	 * There is also one less asid free in this TLB.
867 	 */
868 	KASSERT(ti->ti_asid_hint > KERNEL_PID);
869 	pai->pai_asid = ti->ti_asid_hint++;
870 #ifdef MULTIPROCESSOR
871 	if (PMAP_TLB_FLUSH_ASID_ON_RESET) {
872 		/*
873 		 * Clean the new ASID from the TLB.
874 		 */
875 		tlb_invalidate_asids(pai->pai_asid, pai->pai_asid);
876 	}
877 #endif
878 	TLBINFO_ASID_MARK_USED(ti, pai->pai_asid);
879 	LIST_INSERT_HEAD(&ti->ti_pais, pai, pai_link);
880 	ti->ti_asids_free--;
881 
882 #if defined(MULTIPROCESSOR)
883 	/*
884 	 * Mark that we now have an active ASID for all CPUs sharing this TLB.
885 	 * The bits in pm_active belonging to this TLB can only be changed
886 	 * while this TLBs lock is held.
887 	 */
888 #if PMAP_TLB_MAX == 1
889 	kcpuset_copy(pm->pm_active, kcpuset_running);
890 #else
891 	kcpuset_merge(pm->pm_active, ti->ti_kcpuset);
892 #endif
893 #endif
894 }
895 
896 /*
897  * Acquire a TLB address space tag (called ASID or TLBPID) and return it.
898  * ASID might have already been previously acquired.
899  */
900 void
901 pmap_tlb_asid_acquire(pmap_t pm, struct lwp *l)
902 {
903 	struct cpu_info * const ci = l->l_cpu;
904 	struct pmap_tlb_info * const ti = cpu_tlb_info(ci);
905 	struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
906 
907 	UVMHIST_FUNC(__func__);
908 	UVMHIST_CALLARGS(maphist, "(pm=%#jx, l=%#jx, ti=%#jx)", (uintptr_t)pm,
909 	    (uintptr_t)l, (uintptr_t)ti, 0);
910 
911 	KASSERT(kpreempt_disabled());
912 
913 	/*
914 	 * Kernels use a fixed ASID and thus doesn't need to acquire one.
915 	 */
916 	if (pm == pmap_kernel()) {
917 		UVMHIST_LOG(maphist, " <-- done (kernel)", 0, 0, 0, 0);
918 		return;
919 	}
920 
921 	TLBINFO_LOCK(ti);
922 	KASSERT(pai->pai_asid <= KERNEL_PID || pai->pai_link.le_prev != NULL);
923 	KASSERT(pai->pai_asid > KERNEL_PID || pai->pai_link.le_prev == NULL);
924 	pmap_tlb_pai_check(ti, true);
925 	if (__predict_false(!PMAP_PAI_ASIDVALID_P(pai, ti))) {
926 		/*
927 		 * If we've run out ASIDs, reinitialize the ASID space.
928 		 */
929 		if (__predict_false(tlbinfo_noasids_p(ti))) {
930 			KASSERT(l == curlwp);
931 			UVMHIST_LOG(maphist, " asid reinit", 0, 0, 0, 0);
932 			pmap_tlb_asid_reinitialize(ti, TLBINV_NOBODY);
933 			KASSERT(!tlbinfo_noasids_p(ti));
934 		}
935 
936 		/*
937 		 * Get an ASID.
938 		 */
939 		pmap_tlb_asid_alloc(ti, pm, pai);
940 		UVMHIST_LOG(maphist, "allocated asid %#jx", pai->pai_asid,
941 		    0, 0, 0);
942 	}
943 	pmap_tlb_pai_check(ti, true);
944 #if defined(MULTIPROCESSOR)
945 	KASSERT(kcpuset_isset(pm->pm_active, cpu_index(ci)));
946 #endif
947 
948 	if (l == curlwp) {
949 #if defined(MULTIPROCESSOR)
950 		/*
951 		 * The bits in pm_onproc belonging to this TLB can only
952 		 * be changed while this TLBs lock is held unless atomic
953 		 * operations are used.
954 		 */
955 		KASSERT(pm != pmap_kernel());
956 		kcpuset_atomic_set(pm->pm_onproc, cpu_index(ci));
957 #endif
958 		ci->ci_pmap_asid_cur = pai->pai_asid;
959 		UVMHIST_LOG(maphist, "setting asid to %#jx", pai->pai_asid,
960 		    0, 0, 0);
961 		tlb_set_asid(pai->pai_asid);
962 		pmap_tlb_asid_check();
963 	} else {
964 		printf("%s: l (%p) != curlwp %p\n", __func__, l, curlwp);
965 	}
966 	TLBINFO_UNLOCK(ti);
967 	UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
968 }
969 
970 void
971 pmap_tlb_asid_deactivate(pmap_t pm)
972 {
973 	UVMHIST_FUNC(__func__);
974 	UVMHIST_CALLARGS(maphist, "pm %#jx", (uintptr_t)pm, 0, 0, 0);
975 
976 	KASSERT(kpreempt_disabled());
977 #if defined(MULTIPROCESSOR)
978 	/*
979 	 * The kernel pmap is aways onproc and active and must never have
980 	 * those bits cleared.  If pmap_remove_all was called, it has already
981 	 * deactivated the pmap and thusly onproc will be 0 so there's nothing
982 	 * to do.
983 	 */
984 	if (pm != pmap_kernel() && !kcpuset_iszero(pm->pm_onproc)) {
985 		struct cpu_info * const ci = curcpu();
986 		KASSERT(!cpu_intr_p());
987 		KASSERTMSG(kcpuset_isset(pm->pm_onproc, cpu_index(ci)),
988 		    "%s: pmap %p onproc %p doesn't include cpu %d (%p)",
989 		    __func__, pm, pm->pm_onproc, cpu_index(ci), ci);
990 		/*
991 		 * The bits in pm_onproc that belong to this TLB can
992 		 * be changed while this TLBs lock is not held as long
993 		 * as we use atomic ops.
994 		 */
995 		kcpuset_atomic_clear(pm->pm_onproc, cpu_index(ci));
996 	}
997 #endif
998 	curcpu()->ci_pmap_asid_cur = KERNEL_PID;
999 	tlb_set_asid(KERNEL_PID);
1000 
1001 	pmap_tlb_pai_check(cpu_tlb_info(curcpu()), false);
1002 #if defined(DEBUG)
1003 	pmap_tlb_asid_check();
1004 #endif
1005 	UVMHIST_LOG(maphist, " <-- done (pm=%#jx)", (uintptr_t)pm, 0, 0, 0);
1006 }
1007 
1008 void
1009 pmap_tlb_asid_release_all(struct pmap *pm)
1010 {
1011 	UVMHIST_FUNC(__func__);
1012 	UVMHIST_CALLARGS(maphist, "(pm=%#jx)", (uintptr_t)pm, 0, 0, 0);
1013 
1014 	KASSERT(pm != pmap_kernel());
1015 #if defined(MULTIPROCESSOR)
1016 	//KASSERT(!kcpuset_iszero(pm->pm_onproc)); // XXX
1017 	struct cpu_info * const ci __diagused = curcpu();
1018 	KASSERT(!kcpuset_isotherset(pm->pm_onproc, cpu_index(ci)));
1019 #if PMAP_TLB_MAX > 1
1020 	for (u_int i = 0; !kcpuset_iszero(pm->pm_active); i++) {
1021 		KASSERT(i < pmap_ntlbs);
1022 		struct pmap_tlb_info * const ti = pmap_tlbs[i];
1023 #else
1024 		struct pmap_tlb_info * const ti = &pmap_tlb0_info;
1025 #endif
1026 		struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
1027 		TLBINFO_LOCK(ti);
1028 		if (PMAP_PAI_ASIDVALID_P(pai, ti)) {
1029 			/*
1030 			 * This pmap should not be in use by any other cpu so
1031 			 * we can just reset and be happy.
1032 			 */
1033 			if (ti->ti_victim == pm)
1034 				ti->ti_victim = NULL;
1035 			pmap_tlb_pai_reset(ti, pai, pm);
1036 		}
1037 		KASSERT(pai->pai_link.le_prev == NULL);
1038 		TLBINFO_UNLOCK(ti);
1039 #if PMAP_TLB_MAX > 1
1040 	}
1041 #endif
1042 #ifdef DIAGNOSTIC
1043 	for (size_t i = 0; i < (PMAP_TLB_MAX > 1 ? pmap_ntlbs : 1); i++) {
1044 		KASSERTMSG(pm->pm_pai[i].pai_asid == 0,
1045 		    "pm %p i %zu asid %u",
1046 		    pm, i, pm->pm_pai[i].pai_asid);
1047 	}
1048 #endif
1049 #else
1050 	/*
1051 	 * Handle the case of an UP kernel which only has, at most, one TLB.
1052 	 * If the pmap has an ASID allocated, free it.
1053 	 */
1054 	struct pmap_tlb_info * const ti = &pmap_tlb0_info;
1055 	struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
1056 	TLBINFO_LOCK(ti);
1057 	if (pai->pai_asid > KERNEL_PID) {
1058 		if (curcpu()->ci_pmap_asid_cur == pai->pai_asid) {
1059 			tlb_invalidate_asids(pai->pai_asid, pai->pai_asid);
1060 		} else {
1061 			pmap_tlb_pai_reset(ti, pai, pm);
1062 		}
1063 	}
1064 	TLBINFO_UNLOCK(ti);
1065 #endif /* MULTIPROCESSOR */
1066 	UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
1067 }
1068 
1069 void
1070 pmap_tlb_asid_check(void)
1071 {
1072 #ifdef DEBUG
1073 	kpreempt_disable();
1074 	const tlb_asid_t asid __debugused = tlb_get_asid();
1075 	KDASSERTMSG(asid == curcpu()->ci_pmap_asid_cur,
1076 	   "%s: asid (%#x) != current asid (%#x)",
1077 	    __func__, asid, curcpu()->ci_pmap_asid_cur);
1078 	kpreempt_enable();
1079 #endif
1080 }
1081 
1082 #ifdef DEBUG
1083 void
1084 pmap_tlb_check(pmap_t pm, bool (*func)(void *, vaddr_t, tlb_asid_t, pt_entry_t))
1085 {
1086         struct pmap_tlb_info * const ti = cpu_tlb_info(curcpu());
1087         struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
1088         TLBINFO_LOCK(ti);
1089         if (pm == pmap_kernel() || pai->pai_asid > KERNEL_PID)
1090 		tlb_walk(pm, func);
1091         TLBINFO_UNLOCK(ti);
1092 }
1093 #endif /* DEBUG */
1094