1*fbbd98edSskrll /* $NetBSD: pmap_tlb.c,v 1.62 2024/01/01 16:56:30 skrll Exp $ */
2b1425120Schristos
3b1425120Schristos /*-
4b1425120Schristos * Copyright (c) 2010 The NetBSD Foundation, Inc.
5b1425120Schristos * All rights reserved.
6b1425120Schristos *
7b1425120Schristos * This code is derived from software contributed to The NetBSD Foundation
8b1425120Schristos * by Matt Thomas at 3am Software Foundry.
9b1425120Schristos *
10b1425120Schristos * Redistribution and use in source and binary forms, with or without
11b1425120Schristos * modification, are permitted provided that the following conditions
12b1425120Schristos * are met:
13b1425120Schristos * 1. Redistributions of source code must retain the above copyright
14b1425120Schristos * notice, this list of conditions and the following disclaimer.
15b1425120Schristos * 2. Redistributions in binary form must reproduce the above copyright
16b1425120Schristos * notice, this list of conditions and the following disclaimer in the
17b1425120Schristos * documentation and/or other materials provided with the distribution.
18b1425120Schristos *
19b1425120Schristos * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20b1425120Schristos * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21b1425120Schristos * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22b1425120Schristos * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23b1425120Schristos * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24b1425120Schristos * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25b1425120Schristos * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26b1425120Schristos * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27b1425120Schristos * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28b1425120Schristos * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29b1425120Schristos * POSSIBILITY OF SUCH DAMAGE.
30b1425120Schristos */
31b1425120Schristos
32b1425120Schristos #include <sys/cdefs.h>
33b1425120Schristos
34*fbbd98edSskrll __KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v 1.62 2024/01/01 16:56:30 skrll Exp $");
35b1425120Schristos
36b1425120Schristos /*
37b1425120Schristos * Manages address spaces in a TLB.
38b1425120Schristos *
39b1425120Schristos * Normally there is a 1:1 mapping between a TLB and a CPU. However, some
40b1425120Schristos * implementations may share a TLB between multiple CPUs (really CPU thread
41b1425120Schristos * contexts). This requires the TLB abstraction to be separated from the
42b1425120Schristos * CPU abstraction. It also requires that the TLB be locked while doing
43b1425120Schristos * TLB activities.
44b1425120Schristos *
45b1425120Schristos * For each TLB, we track the ASIDs in use in a bitmap and a list of pmaps
46b1425120Schristos * that have a valid ASID.
47b1425120Schristos *
48b1425120Schristos * We allocate ASIDs in increasing order until we have exhausted the supply,
49b1425120Schristos * then reinitialize the ASID space, and start allocating again at 1. When
50b1425120Schristos * allocating from the ASID bitmap, we skip any ASID who has a corresponding
51b1425120Schristos * bit set in the ASID bitmap. Eventually this causes the ASID bitmap to fill
52b1425120Schristos * and, when completely filled, a reinitialization of the ASID space.
53b1425120Schristos *
54b1425120Schristos * To reinitialize the ASID space, the ASID bitmap is reset and then the ASIDs
55b1425120Schristos * of non-kernel TLB entries get recorded in the ASID bitmap. If the entries
56b1425120Schristos * in TLB consume more than half of the ASID space, all ASIDs are invalidated,
57b1425120Schristos * the ASID bitmap is recleared, and the list of pmaps is emptied. Otherwise,
58b1425120Schristos * (the normal case), any ASID present in the TLB (even those which are no
59b1425120Schristos * longer used by a pmap) will remain active (allocated) and all other ASIDs
60b1425120Schristos * will be freed. If the size of the TLB is much smaller than the ASID space,
61b1425120Schristos * this algorithm completely avoids TLB invalidation.
62b1425120Schristos *
63b1425120Schristos * For multiprocessors, we also have to deal TLB invalidation requests from
64b1425120Schristos * other CPUs, some of which are dealt with the reinitialization of the ASID
65b1425120Schristos * space. Whereas above we keep the ASIDs of those pmaps which have active
66b1425120Schristos * TLB entries, this type of reinitialization preserves the ASIDs of any
67b1425120Schristos * "onproc" user pmap and all other ASIDs will be freed. We must do this
68b1425120Schristos * since we can't change the current ASID.
69b1425120Schristos *
70b1425120Schristos * Each pmap has two bitmaps: pm_active and pm_onproc. Each bit in pm_active
71b1425120Schristos * indicates whether that pmap has an allocated ASID for a CPU. Each bit in
724aca4be1Sskrll * pm_onproc indicates that the pmap's ASID is in use, i.e. a CPU has it in its
734aca4be1Sskrll * "current ASID" field, e.g. the ASID field of the COP 0 register EntryHi for
744aca4be1Sskrll * MIPS, or the ASID field of TTBR0 for AA64. The bit number used in these
754aca4be1Sskrll * bitmaps comes from the CPU's cpu_index(). Even though these bitmaps contain
764aca4be1Sskrll * the bits for all CPUs, the bits that correspond to the bits belonging to
774aca4be1Sskrll * the CPUs sharing a TLB can only be manipulated while holding that TLB's
784aca4be1Sskrll * lock. Atomic ops must be used to update them since multiple CPUs may be
794aca4be1Sskrll * changing different sets of bits at same time but these sets never overlap.
80b1425120Schristos *
81b1425120Schristos * When a change to the local TLB may require a change in the TLB's of other
82b1425120Schristos * CPUs, we try to avoid sending an IPI if at all possible. For instance, if
83b1425120Schristos * we are updating a PTE and that PTE previously was invalid and therefore
84b1425120Schristos * couldn't support an active mapping, there's no need for an IPI since there
85b1425120Schristos * can't be a TLB entry to invalidate. The other case is when we change a PTE
86b1425120Schristos * to be modified we just update the local TLB. If another TLB has a stale
87b1425120Schristos * entry, a TLB MOD exception will be raised and that will cause the local TLB
88b1425120Schristos * to be updated.
89b1425120Schristos *
90b1425120Schristos * We never need to update a non-local TLB if the pmap doesn't have a valid
91b1425120Schristos * ASID for that TLB. If it does have a valid ASID but isn't current "onproc"
92b1425120Schristos * we simply reset its ASID for that TLB and then when it goes "onproc" it
93b1425120Schristos * will allocate a new ASID and any existing TLB entries will be orphaned.
94b1425120Schristos * Only in the case that pmap has an "onproc" ASID do we actually have to send
95b1425120Schristos * an IPI.
96b1425120Schristos *
97b1425120Schristos * Once we determined we must send an IPI to shootdown a TLB, we need to send
98b1425120Schristos * it to one of CPUs that share that TLB. We choose the lowest numbered CPU
99b1425120Schristos * that has one of the pmap's ASID "onproc". In reality, any CPU sharing that
100b1425120Schristos * TLB would do, but interrupting an active CPU seems best.
101b1425120Schristos *
102b1425120Schristos * A TLB might have multiple shootdowns active concurrently. The shootdown
103b1425120Schristos * logic compresses these into a few cases:
104b1425120Schristos * 0) nobody needs to have its TLB entries invalidated
105b1425120Schristos * 1) one ASID needs to have its TLB entries invalidated
106b1425120Schristos * 2) more than one ASID needs to have its TLB entries invalidated
107b1425120Schristos * 3) the kernel needs to have its TLB entries invalidated
108b1425120Schristos * 4) the kernel and one or more ASID need their TLB entries invalidated.
109b1425120Schristos *
110b1425120Schristos * And for each case we do:
111b1425120Schristos * 0) nothing,
112b1425120Schristos * 1) if that ASID is still "onproc", we invalidate the TLB entries for
113b1425120Schristos * that single ASID. If not, just reset the pmap's ASID to invalidate
114b1425120Schristos * and let it allocate a new ASID the next time it goes "onproc",
115b1425120Schristos * 2) we reinitialize the ASID space (preserving any "onproc" ASIDs) and
116b1425120Schristos * invalidate all non-wired non-global TLB entries,
117b1425120Schristos * 3) we invalidate all of the non-wired global TLB entries,
118b1425120Schristos * 4) we reinitialize the ASID space (again preserving any "onproc" ASIDs)
119b1425120Schristos * invalidate all non-wired TLB entries.
120b1425120Schristos *
121b1425120Schristos * As you can see, shootdowns are not concerned with addresses, just address
122b1425120Schristos * spaces. Since the number of TLB entries is usually quite small, this avoids
123b1425120Schristos * a lot of overhead for not much gain.
124b1425120Schristos */
125b1425120Schristos
126b1425120Schristos #define __PMAP_PRIVATE
127b1425120Schristos
128b1425120Schristos #include "opt_multiprocessor.h"
129b1425120Schristos
130b1425120Schristos #include <sys/param.h>
13139914130Sskrll
132b1425120Schristos #include <sys/atomic.h>
133b1425120Schristos #include <sys/cpu.h>
13439914130Sskrll #include <sys/kernel.h> /* for cold */
13539914130Sskrll #include <sys/mutex.h>
13639914130Sskrll #include <sys/proc.h>
13739914130Sskrll #include <sys/systm.h>
138b1425120Schristos
139b1425120Schristos #include <uvm/uvm.h>
140b1425120Schristos
1411627c0aeSmatt static kmutex_t pmap_tlb0_lock __cacheline_aligned;
142b1425120Schristos
143b1425120Schristos #define IFCONSTANT(x) (__builtin_constant_p((x)) ? (x) : 0)
144b1425120Schristos
145f90211bbSjdolecek #if KERNEL_PID > 31
146f90211bbSjdolecek #error "KERNEL_PID expected in range 0-31"
147f90211bbSjdolecek #endif
148f90211bbSjdolecek
149f82a6142Sjdolecek #define TLBINFO_ASID_MARK_UNUSED(ti, asid) \
150f82a6142Sjdolecek __BITMAP_CLR((asid), &(ti)->ti_asid_bitmap)
151f82a6142Sjdolecek #define TLBINFO_ASID_MARK_USED(ti, asid) \
152f82a6142Sjdolecek __BITMAP_SET((asid), &(ti)->ti_asid_bitmap)
153f82a6142Sjdolecek #define TLBINFO_ASID_INUSE_P(ti, asid) \
154f82a6142Sjdolecek __BITMAP_ISSET((asid), &(ti)->ti_asid_bitmap)
155f82a6142Sjdolecek #define TLBINFO_ASID_RESET(ti) \
156f82a6142Sjdolecek do { \
157f82a6142Sjdolecek __BITMAP_ZERO(&ti->ti_asid_bitmap); \
158f82a6142Sjdolecek for (tlb_asid_t asid = 0; asid <= KERNEL_PID; asid++) \
159f82a6142Sjdolecek TLBINFO_ASID_MARK_USED(ti, asid); \
160f82a6142Sjdolecek } while (0)
161f82a6142Sjdolecek #define TLBINFO_ASID_INITIAL_FREE(asid_max) \
162f82a6142Sjdolecek (asid_max + 1 /* 0 */ - (1 + KERNEL_PID))
163f82a6142Sjdolecek
164b1425120Schristos struct pmap_tlb_info pmap_tlb0_info = {
165b1425120Schristos .ti_name = "tlb0",
166b1425120Schristos .ti_asid_hint = KERNEL_PID + 1,
167b1425120Schristos #ifdef PMAP_TLB_NUM_PIDS
168b1425120Schristos .ti_asid_max = IFCONSTANT(PMAP_TLB_NUM_PIDS - 1),
169f82a6142Sjdolecek .ti_asids_free = IFCONSTANT(
170f82a6142Sjdolecek TLBINFO_ASID_INITIAL_FREE(PMAP_TLB_NUM_PIDS - 1)),
171b1425120Schristos #endif
172f82a6142Sjdolecek .ti_asid_bitmap._b[0] = __BITS(0, KERNEL_PID),
173b1425120Schristos #ifdef PMAP_TLB_WIRED_UPAGES
174b1425120Schristos .ti_wired = PMAP_TLB_WIRED_UPAGES,
175b1425120Schristos #endif
1761627c0aeSmatt .ti_lock = &pmap_tlb0_lock,
177b1425120Schristos .ti_pais = LIST_HEAD_INITIALIZER(pmap_tlb0_info.ti_pais),
178fe1b443aSmatt #if defined(MULTIPROCESSOR) && PMAP_TLB_MAX > 1
179b1425120Schristos .ti_tlbinvop = TLBINV_NOBODY,
180b1425120Schristos #endif
181b1425120Schristos };
182b1425120Schristos
183b1425120Schristos #undef IFCONSTANT
184b1425120Schristos
185fe1b443aSmatt #if defined(MULTIPROCESSOR) && PMAP_TLB_MAX > 1
186fe1b443aSmatt struct pmap_tlb_info *pmap_tlbs[PMAP_TLB_MAX] = {
187b1425120Schristos [0] = &pmap_tlb0_info,
188b1425120Schristos };
189b1425120Schristos u_int pmap_ntlbs = 1;
190b1425120Schristos #endif
191b1425120Schristos
192fe1b443aSmatt #ifdef MULTIPROCESSOR
1937c4334f7Sjoerg __unused static inline bool
pmap_tlb_intersecting_active_p(pmap_t pm,struct pmap_tlb_info * ti)194fe1b443aSmatt pmap_tlb_intersecting_active_p(pmap_t pm, struct pmap_tlb_info *ti)
195fe1b443aSmatt {
196fe1b443aSmatt #if PMAP_TLB_MAX == 1
197fe1b443aSmatt return !kcpuset_iszero(pm->pm_active);
198fe1b443aSmatt #else
199fe1b443aSmatt return kcpuset_intersecting_p(pm->pm_active, ti->ti_kcpuset);
200fe1b443aSmatt #endif
201fe1b443aSmatt }
202fe1b443aSmatt
203fe1b443aSmatt static inline bool
pmap_tlb_intersecting_onproc_p(pmap_t pm,struct pmap_tlb_info * ti)204fe1b443aSmatt pmap_tlb_intersecting_onproc_p(pmap_t pm, struct pmap_tlb_info *ti)
205fe1b443aSmatt {
206fe1b443aSmatt #if PMAP_TLB_MAX == 1
207fe1b443aSmatt return !kcpuset_iszero(pm->pm_onproc);
208fe1b443aSmatt #else
209fe1b443aSmatt return kcpuset_intersecting_p(pm->pm_onproc, ti->ti_kcpuset);
210fe1b443aSmatt #endif
211fe1b443aSmatt }
212fe1b443aSmatt #endif
213fe1b443aSmatt
2145528d7fdSmatt static void
pmap_tlb_pai_check(struct pmap_tlb_info * ti,bool locked_p)215fbaba5f3Smatt pmap_tlb_pai_check(struct pmap_tlb_info *ti, bool locked_p)
2165528d7fdSmatt {
217a9bc40deSskrll UVMHIST_FUNC(__func__);
218221a8972Sskrll UVMHIST_CALLARGS(maphist, "(ti=%#jx)", (uintptr_t)ti, 0, 0, 0);
219a9bc40deSskrll
2205528d7fdSmatt #ifdef DIAGNOSTIC
2215528d7fdSmatt struct pmap_asid_info *pai;
222fbaba5f3Smatt if (!locked_p)
223fbaba5f3Smatt TLBINFO_LOCK(ti);
2245528d7fdSmatt LIST_FOREACH(pai, &ti->ti_pais, pai_link) {
2255528d7fdSmatt KASSERT(pai != NULL);
2265528d7fdSmatt KASSERT(PAI_PMAP(pai, ti) != pmap_kernel());
2275528d7fdSmatt KASSERT(pai->pai_asid > KERNEL_PID);
2285528d7fdSmatt KASSERTMSG(pai->pai_asid <= ti->ti_asid_max,
2295528d7fdSmatt "pm %p asid %#x", PAI_PMAP(pai, ti), pai->pai_asid);
2305528d7fdSmatt KASSERTMSG(TLBINFO_ASID_INUSE_P(ti, pai->pai_asid),
2315528d7fdSmatt "pm %p asid %u", PAI_PMAP(pai, ti), pai->pai_asid);
2325528d7fdSmatt #ifdef MULTIPROCESSOR
2335528d7fdSmatt KASSERT(pmap_tlb_intersecting_active_p(PAI_PMAP(pai, ti), ti));
2345528d7fdSmatt #endif
2355528d7fdSmatt }
236fbaba5f3Smatt if (!locked_p)
237fbaba5f3Smatt TLBINFO_UNLOCK(ti);
2385528d7fdSmatt #endif
239221a8972Sskrll UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
2405528d7fdSmatt }
2415528d7fdSmatt
2425528d7fdSmatt static void
pmap_tlb_pai_reset(struct pmap_tlb_info * ti,struct pmap_asid_info * pai,struct pmap * pm)2435528d7fdSmatt pmap_tlb_pai_reset(struct pmap_tlb_info *ti, struct pmap_asid_info *pai,
244b1425120Schristos struct pmap *pm)
245b1425120Schristos {
246e4535b97Sskrll UVMHIST_FUNC(__func__);
247e4535b97Sskrll UVMHIST_CALLARGS(maphist, "(ti=%#jx, pai=%#jx, pm=%#jx): asid %u",
24876cb9a05Sskrll (uintptr_t)ti, (uintptr_t)pai, (uintptr_t)pm, pai->pai_asid);
2495528d7fdSmatt
250b1425120Schristos /*
251b1425120Schristos * We must have an ASID but it must not be onproc (on a processor).
252b1425120Schristos */
253b1425120Schristos KASSERT(pai->pai_asid > KERNEL_PID);
2545528d7fdSmatt KASSERT(pai->pai_asid <= ti->ti_asid_max);
255b1425120Schristos #if defined(MULTIPROCESSOR)
2565528d7fdSmatt KASSERT(pmap_tlb_intersecting_active_p(pm, ti));
257fe1b443aSmatt KASSERT(!pmap_tlb_intersecting_onproc_p(pm, ti));
258b1425120Schristos #endif
259b1425120Schristos LIST_REMOVE(pai, pai_link);
260b1425120Schristos #ifdef DIAGNOSTIC
261b1425120Schristos pai->pai_link.le_prev = NULL; /* tagged as unlinked */
262b1425120Schristos #endif
263b1425120Schristos /*
2641627c0aeSmatt * If the platform has a cheap way to flush ASIDs then free the ASID
2651627c0aeSmatt * back into the pool. On multiprocessor systems, we will flush the
2661627c0aeSmatt * ASID from the TLB when it's allocated. That way we know the flush
2671627c0aeSmatt * was always done in the correct TLB space. On uniprocessor systems,
2681627c0aeSmatt * just do the flush now since we know that it has been used. This has
2691627c0aeSmatt * a bit less overhead. Either way, this will mean that we will only
2701627c0aeSmatt * need to flush all ASIDs if all ASIDs are in use and we need to
2711627c0aeSmatt * allocate a new one.
2721627c0aeSmatt */
2731627c0aeSmatt if (PMAP_TLB_FLUSH_ASID_ON_RESET) {
2741627c0aeSmatt #ifndef MULTIPROCESSOR
275a9bc40deSskrll UVMHIST_LOG(maphist, " ... asid %u flushed", pai->pai_asid, 0,
276a9bc40deSskrll 0, 0);
2771627c0aeSmatt tlb_invalidate_asids(pai->pai_asid, pai->pai_asid);
2781627c0aeSmatt #endif
2791627c0aeSmatt if (TLBINFO_ASID_INUSE_P(ti, pai->pai_asid)) {
280a9bc40deSskrll UVMHIST_LOG(maphist, " ... asid marked unused",
281a9bc40deSskrll pai->pai_asid, 0, 0, 0);
2821627c0aeSmatt TLBINFO_ASID_MARK_UNUSED(ti, pai->pai_asid);
2831627c0aeSmatt ti->ti_asids_free++;
2841627c0aeSmatt }
2851627c0aeSmatt }
2861627c0aeSmatt /*
287b1425120Schristos * Note that we don't mark the ASID as not in use in the TLB's ASID
288b1425120Schristos * bitmap (thus it can't be allocated until the ASID space is exhausted
289b1425120Schristos * and therefore reinitialized). We don't want to flush the TLB for
290b1425120Schristos * entries belonging to this ASID so we will let natural TLB entry
291b1425120Schristos * replacement flush them out of the TLB. Any new entries for this
292b1425120Schristos * pmap will need a new ASID allocated.
293b1425120Schristos */
294b1425120Schristos pai->pai_asid = 0;
295b1425120Schristos
296b1425120Schristos #if defined(MULTIPROCESSOR)
297b1425120Schristos /*
298b1425120Schristos * The bits in pm_active belonging to this TLB can only be changed
299b1425120Schristos * while this TLB's lock is held.
300b1425120Schristos */
301fe1b443aSmatt #if PMAP_TLB_MAX == 1
302fe1b443aSmatt kcpuset_zero(pm->pm_active);
303fe1b443aSmatt #else
3045528d7fdSmatt kcpuset_remove(pm->pm_active, ti->ti_kcpuset);
305fe1b443aSmatt #endif
3065528d7fdSmatt KASSERT(!pmap_tlb_intersecting_active_p(pm, ti));
307b1425120Schristos #endif /* MULTIPROCESSOR */
3085528d7fdSmatt
3095528d7fdSmatt UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
310b1425120Schristos }
311b1425120Schristos
312b1425120Schristos void
pmap_tlb_info_evcnt_attach(struct pmap_tlb_info * ti)313b1425120Schristos pmap_tlb_info_evcnt_attach(struct pmap_tlb_info *ti)
314b1425120Schristos {
3154248f17bSjdolecek #if defined(MULTIPROCESSOR) && !defined(PMAP_TLB_NO_SYNCI_EVCNT)
316b1425120Schristos evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_desired,
317b1425120Schristos EVCNT_TYPE_MISC, NULL,
318b1425120Schristos ti->ti_name, "icache syncs desired");
319b1425120Schristos evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_asts,
320b1425120Schristos EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_desired,
321b1425120Schristos ti->ti_name, "icache sync asts");
322b1425120Schristos evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_all,
323b1425120Schristos EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_asts,
324b1425120Schristos ti->ti_name, "icache full syncs");
325b1425120Schristos evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_pages,
326b1425120Schristos EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_asts,
327b1425120Schristos ti->ti_name, "icache pages synced");
328b1425120Schristos evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_duplicate,
329b1425120Schristos EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_desired,
330b1425120Schristos ti->ti_name, "icache dup pages skipped");
331b1425120Schristos evcnt_attach_dynamic_nozero(&ti->ti_evcnt_synci_deferred,
332b1425120Schristos EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_desired,
333b1425120Schristos ti->ti_name, "icache pages deferred");
3344248f17bSjdolecek #endif /* MULTIPROCESSOR && !PMAP_TLB_NO_SYNCI_EVCNT */
335b1425120Schristos evcnt_attach_dynamic_nozero(&ti->ti_evcnt_asid_reinits,
336b1425120Schristos EVCNT_TYPE_MISC, NULL,
337b1425120Schristos ti->ti_name, "asid pool reinit");
338b1425120Schristos }
339b1425120Schristos
340b1425120Schristos void
pmap_tlb_info_init(struct pmap_tlb_info * ti)341b1425120Schristos pmap_tlb_info_init(struct pmap_tlb_info *ti)
342b1425120Schristos {
343b1425120Schristos #if defined(MULTIPROCESSOR)
344fe1b443aSmatt #if PMAP_TLB_MAX == 1
345fe1b443aSmatt KASSERT(ti == &pmap_tlb0_info);
346fe1b443aSmatt #else
347b1425120Schristos if (ti != &pmap_tlb0_info) {
348fe1b443aSmatt KASSERT(pmap_ntlbs < PMAP_TLB_MAX);
349b1425120Schristos
350b1425120Schristos KASSERT(pmap_tlbs[pmap_ntlbs] == NULL);
351b1425120Schristos
352b1425120Schristos ti->ti_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
353f82a6142Sjdolecek TLBINFO_ASID_RESET(ti);
354b1425120Schristos ti->ti_asid_hint = KERNEL_PID + 1;
355b1425120Schristos ti->ti_asid_max = pmap_tlbs[0]->ti_asid_max;
356f82a6142Sjdolecek ti->ti_asids_free = TLBINFO_ASID_INITIAL_FREE(ti->ti_asid_max);
3579015c01fSchristos ti->ti_tlbinvop = TLBINV_NOBODY;
358b1425120Schristos ti->ti_victim = NULL;
359fe1b443aSmatt kcpuset_create(&ti->ti_kcpuset, true);
360b1425120Schristos ti->ti_index = pmap_ntlbs++;
361b1425120Schristos ti->ti_wired = 0;
362b1425120Schristos pmap_tlbs[ti->ti_index] = ti;
363b1425120Schristos snprintf(ti->ti_name, sizeof(ti->ti_name), "tlb%u",
364b1425120Schristos ti->ti_index);
365b1425120Schristos pmap_tlb_info_evcnt_attach(ti);
366f90211bbSjdolecek
367f90211bbSjdolecek KASSERT(ti->ti_asid_max < PMAP_TLB_BITMAP_LENGTH);
368b1425120Schristos return;
369b1425120Schristos }
370fe1b443aSmatt #endif
371b1425120Schristos #endif /* MULTIPROCESSOR */
372b1425120Schristos KASSERT(ti == &pmap_tlb0_info);
3731627c0aeSmatt KASSERT(ti->ti_lock == &pmap_tlb0_lock);
374a9bc40deSskrll
375b1425120Schristos mutex_init(ti->ti_lock, MUTEX_DEFAULT, IPL_SCHED);
376fe1b443aSmatt #if defined(MULTIPROCESSOR) && PMAP_TLB_MAX > 1
377fe1b443aSmatt kcpuset_create(&ti->ti_kcpuset, true);
3781627c0aeSmatt kcpuset_set(ti->ti_kcpuset, cpu_index(curcpu()));
379fe1b443aSmatt #endif
380a9bc40deSskrll
381aa84fd75Sskrll const tlb_asid_t asid_max = pmap_md_tlb_asid_max();
382d9b4b0cdSsimonb if (ti->ti_asid_max == 0 || asid_max < ti->ti_asid_max) {
383aa84fd75Sskrll ti->ti_asid_max = asid_max;
384f82a6142Sjdolecek ti->ti_asids_free = TLBINFO_ASID_INITIAL_FREE(ti->ti_asid_max);
385b1425120Schristos }
386b1425120Schristos
387a8b51939Sskrll KASSERT(__type_fit(tlb_asid_t, ti->ti_asid_max + 1));
388f90211bbSjdolecek KASSERT(ti->ti_asid_max < PMAP_TLB_BITMAP_LENGTH);
389b1425120Schristos }
390b1425120Schristos
391b1425120Schristos #if defined(MULTIPROCESSOR)
392b1425120Schristos void
pmap_tlb_info_attach(struct pmap_tlb_info * ti,struct cpu_info * ci)393b1425120Schristos pmap_tlb_info_attach(struct pmap_tlb_info *ti, struct cpu_info *ci)
394b1425120Schristos {
395b1425120Schristos KASSERT(!CPU_IS_PRIMARY(ci));
396b1425120Schristos KASSERT(ci->ci_data.cpu_idlelwp != NULL);
397b1425120Schristos KASSERT(cold);
398b1425120Schristos
399b1425120Schristos TLBINFO_LOCK(ti);
400fe1b443aSmatt #if PMAP_TLB_MAX > 1
401fe1b443aSmatt kcpuset_set(ti->ti_kcpuset, cpu_index(ci));
40242a7dfa3Smatt cpu_set_tlb_info(ci, ti);
4031627c0aeSmatt #endif
404b1425120Schristos
405b1425120Schristos /*
406b1425120Schristos * Do any MD tlb info init.
407b1425120Schristos */
408b1425120Schristos pmap_md_tlb_info_attach(ti, ci);
409b1425120Schristos
410b1425120Schristos /*
411fe1b443aSmatt * The kernel pmap uses the kcpuset_running set so it's always
412fe1b443aSmatt * up-to-date.
413b1425120Schristos */
414b1425120Schristos TLBINFO_UNLOCK(ti);
415b1425120Schristos }
416b1425120Schristos #endif /* MULTIPROCESSOR */
417b1425120Schristos
418b1425120Schristos #ifdef DIAGNOSTIC
419b1425120Schristos static size_t
pmap_tlb_asid_count(struct pmap_tlb_info * ti)420b1425120Schristos pmap_tlb_asid_count(struct pmap_tlb_info *ti)
421b1425120Schristos {
422b1425120Schristos size_t count = 0;
423b1425120Schristos for (tlb_asid_t asid = 1; asid <= ti->ti_asid_max; asid++) {
424f0e5de02Sjdolecek if (TLBINFO_ASID_INUSE_P(ti, asid))
425f0e5de02Sjdolecek count++;
426b1425120Schristos }
427b1425120Schristos return count;
428b1425120Schristos }
429b1425120Schristos #endif
430b1425120Schristos
431b1425120Schristos static void
pmap_tlb_asid_reinitialize(struct pmap_tlb_info * ti,enum tlb_invalidate_op op)432b1425120Schristos pmap_tlb_asid_reinitialize(struct pmap_tlb_info *ti, enum tlb_invalidate_op op)
433b1425120Schristos {
434e4535b97Sskrll UVMHIST_FUNC(__func__);
435e4535b97Sskrll UVMHIST_CALLARGS(maphist, "(ti=%#jx, op=%ju)", (uintptr_t)ti, op, 0, 0);
4365528d7fdSmatt
437fbaba5f3Smatt pmap_tlb_pai_check(ti, true);
438b1425120Schristos
4391627c0aeSmatt ti->ti_evcnt_asid_reinits.ev_count++;
4401627c0aeSmatt
441b1425120Schristos /*
442b1425120Schristos * First, clear the ASID bitmap (except for ASID 0 which belongs
443b1425120Schristos * to the kernel).
444b1425120Schristos */
445f82a6142Sjdolecek ti->ti_asids_free = TLBINFO_ASID_INITIAL_FREE(ti->ti_asid_max);
446b1425120Schristos ti->ti_asid_hint = KERNEL_PID + 1;
447f90211bbSjdolecek TLBINFO_ASID_RESET(ti);
448b1425120Schristos
449b1425120Schristos switch (op) {
4505528d7fdSmatt #if defined(MULTIPROCESSOR) && defined(PMAP_TLB_NEED_SHOOTDOWN)
451b1425120Schristos case TLBINV_ALL:
452b1425120Schristos tlb_invalidate_all();
453b1425120Schristos break;
454b1425120Schristos case TLBINV_ALLUSER:
455b1425120Schristos tlb_invalidate_asids(KERNEL_PID + 1, ti->ti_asid_max);
456b1425120Schristos break;
4575528d7fdSmatt #endif /* MULTIPROCESSOR && PMAP_TLB_NEED_SHOOTDOWN */
458b1425120Schristos case TLBINV_NOBODY: {
459b1425120Schristos /*
460b1425120Schristos * If we are just reclaiming ASIDs in the TLB, let's go find
461b1425120Schristos * what ASIDs are in use in the TLB. Since this is a
462b1425120Schristos * semi-expensive operation, we don't want to do it too often.
463b1425120Schristos * So if more half of the ASIDs are in use, we don't have
464b1425120Schristos * enough free ASIDs so invalidate the TLB entries with ASIDs
465b1425120Schristos * and clear the ASID bitmap. That will force everyone to
466b1425120Schristos * allocate a new ASID.
467b1425120Schristos */
4685528d7fdSmatt #if !defined(MULTIPROCESSOR) || defined(PMAP_TLB_NEED_SHOOTDOWN)
469b1425120Schristos pmap_tlb_asid_check();
470f90211bbSjdolecek const u_int asids_found = tlb_record_asids(
471f90211bbSjdolecek ti->ti_asid_bitmap._b, ti->ti_asid_max);
472b1425120Schristos pmap_tlb_asid_check();
47351cdc2d5Sjdolecek #ifdef DIAGNOSTIC
47451cdc2d5Sjdolecek const u_int asids_count = pmap_tlb_asid_count(ti);
47551cdc2d5Sjdolecek KASSERTMSG(asids_found == asids_count,
47651cdc2d5Sjdolecek "found %u != count %u", asids_found, asids_count);
477fab98546Schristos #endif
478b1425120Schristos if (__predict_false(asids_found >= ti->ti_asid_max / 2)) {
479b1425120Schristos tlb_invalidate_asids(KERNEL_PID + 1, ti->ti_asid_max);
4805528d7fdSmatt #else /* MULTIPROCESSOR && !PMAP_TLB_NEED_SHOOTDOWN */
481b1425120Schristos /*
482a0e8e968Sskrll * For those systems (PowerPC) that don't require
483b1425120Schristos * cross cpu TLB shootdowns, we have to invalidate the
484b1425120Schristos * entire TLB because we can't record the ASIDs in use
485b1425120Schristos * on the other CPUs. This is hopefully cheaper than
486b1425120Schristos * than trying to use an IPI to record all the ASIDs
487b1425120Schristos * on all the CPUs (which would be a synchronization
488b1425120Schristos * nightmare).
489b1425120Schristos */
490b1425120Schristos tlb_invalidate_all();
4915528d7fdSmatt #endif /* MULTIPROCESSOR && !PMAP_TLB_NEED_SHOOTDOWN */
492f90211bbSjdolecek TLBINFO_ASID_RESET(ti);
493f82a6142Sjdolecek ti->ti_asids_free = TLBINFO_ASID_INITIAL_FREE(
494f82a6142Sjdolecek ti->ti_asid_max);
4955528d7fdSmatt #if !defined(MULTIPROCESSOR) || defined(PMAP_TLB_NEED_SHOOTDOWN)
496b1425120Schristos } else {
497b1425120Schristos ti->ti_asids_free -= asids_found;
498b1425120Schristos }
4995528d7fdSmatt #endif /* !MULTIPROCESSOR || PMAP_TLB_NEED_SHOOTDOWN */
5001627c0aeSmatt KASSERTMSG(ti->ti_asids_free <= ti->ti_asid_max, "%u",
5011627c0aeSmatt ti->ti_asids_free);
502b1425120Schristos break;
503b1425120Schristos }
504b1425120Schristos default:
505b1425120Schristos panic("%s: unexpected op %d", __func__, op);
506b1425120Schristos }
507b1425120Schristos
508b1425120Schristos /*
509b1425120Schristos * Now go through the active ASIDs. If the ASID is on a processor or
510b1425120Schristos * we aren't invalidating all ASIDs and the TLB has an entry owned by
511b1425120Schristos * that ASID, mark it as in use. Otherwise release the ASID.
512b1425120Schristos */
513b1425120Schristos struct pmap_asid_info *pai, *next;
514b1425120Schristos for (pai = LIST_FIRST(&ti->ti_pais); pai != NULL; pai = next) {
515b1425120Schristos struct pmap * const pm = PAI_PMAP(pai, ti);
516b1425120Schristos next = LIST_NEXT(pai, pai_link);
517b1425120Schristos KASSERT(pm != pmap_kernel());
518b1425120Schristos KASSERT(pai->pai_asid > KERNEL_PID);
519b1425120Schristos #if defined(MULTIPROCESSOR)
520fe1b443aSmatt if (pmap_tlb_intersecting_onproc_p(pm, ti)) {
521b1425120Schristos if (!TLBINFO_ASID_INUSE_P(ti, pai->pai_asid)) {
522b1425120Schristos TLBINFO_ASID_MARK_USED(ti, pai->pai_asid);
523b1425120Schristos ti->ti_asids_free--;
524b1425120Schristos }
525b1425120Schristos continue;
526b1425120Schristos }
527b1425120Schristos #endif /* MULTIPROCESSOR */
528b1425120Schristos if (TLBINFO_ASID_INUSE_P(ti, pai->pai_asid)) {
529b1425120Schristos KASSERT(op == TLBINV_NOBODY);
530b1425120Schristos } else {
5315528d7fdSmatt pmap_tlb_pai_reset(ti, pai, pm);
532b1425120Schristos }
533b1425120Schristos }
534b1425120Schristos #ifdef DIAGNOSTIC
5351627c0aeSmatt size_t free_count __diagused = ti->ti_asid_max - pmap_tlb_asid_count(ti);
5361627c0aeSmatt KASSERTMSG(free_count == ti->ti_asids_free,
5371627c0aeSmatt "bitmap error: %zu != %u", free_count, ti->ti_asids_free);
538b1425120Schristos #endif
5395528d7fdSmatt UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
540b1425120Schristos }
541b1425120Schristos
5425528d7fdSmatt #if defined(MULTIPROCESSOR) && defined(PMAP_TLB_NEED_SHOOTDOWN)
54344cfabd5Sskrll #if PMAP_TLB_MAX == 1
544fe1b443aSmatt #error shootdown not required for single TLB systems
545fe1b443aSmatt #endif
546b1425120Schristos void
pmap_tlb_shootdown_process(void)547b1425120Schristos pmap_tlb_shootdown_process(void)
548b1425120Schristos {
549b1425120Schristos struct cpu_info * const ci = curcpu();
55042a7dfa3Smatt struct pmap_tlb_info * const ti = cpu_tlb_info(ci);
551b1425120Schristos
552196ee94dSskrll UVMHIST_FUNC(__func__);
553196ee94dSskrll UVMHIST_CALLED(maphist);
554196ee94dSskrll
555b1425120Schristos KASSERT(cpu_intr_p());
55657bc4fb9Sskrll KASSERTMSG(ci->ci_cpl >= IPL_SCHED, "%s: cpl (%d) < IPL_SCHED (%d)",
557b1425120Schristos __func__, ci->ci_cpl, IPL_SCHED);
558b1425120Schristos
559b1425120Schristos TLBINFO_LOCK(ti);
5602562139dSskrll UVMHIST_LOG(maphist, "ti %#jx", (uintptr_t)ti, 0, 0, 0);
561b1425120Schristos
562b1425120Schristos switch (ti->ti_tlbinvop) {
563b1425120Schristos case TLBINV_ONE: {
564b1425120Schristos /*
565b1425120Schristos * We only need to invalidate one user ASID.
566b1425120Schristos */
5672562139dSskrll UVMHIST_LOG(maphist, "TLBINV_ONE ti->ti_victim %#jx", (uintptr_t)ti->ti_victim, 0, 0, 0);
568b1425120Schristos struct pmap_asid_info * const pai = PMAP_PAI(ti->ti_victim, ti);
569b1425120Schristos KASSERT(ti->ti_victim != pmap_kernel());
5700c8273cdSskrll if (pmap_tlb_intersecting_onproc_p(ti->ti_victim, ti)) {
571e39f33a8Sskrll UVMHIST_LOG(maphist, "... onproc asid %jd", pai->pai_asid, 0, 0, 0);
572b1425120Schristos /*
573b1425120Schristos * The victim is an active pmap so we will just
574b1425120Schristos * invalidate its TLB entries.
575b1425120Schristos */
576b1425120Schristos KASSERT(pai->pai_asid > KERNEL_PID);
577b1425120Schristos pmap_tlb_asid_check();
578b1425120Schristos tlb_invalidate_asids(pai->pai_asid, pai->pai_asid);
579b1425120Schristos pmap_tlb_asid_check();
580b1425120Schristos } else if (pai->pai_asid) {
581e39f33a8Sskrll UVMHIST_LOG(maphist, "... not active asid %jd", pai->pai_asid, 0, 0, 0);
582b1425120Schristos /*
583b1425120Schristos * The victim is no longer an active pmap for this TLB.
584b1425120Schristos * So simply clear its ASID and when pmap_activate is
585b1425120Schristos * next called for this pmap, it will allocate a new
586b1425120Schristos * ASID.
587b1425120Schristos */
5885528d7fdSmatt pmap_tlb_pai_reset(ti, pai, PAI_PMAP(pai, ti));
589b1425120Schristos }
590b1425120Schristos break;
591b1425120Schristos }
592b1425120Schristos case TLBINV_ALLUSER:
593b1425120Schristos /*
594b1425120Schristos * Flush all user TLB entries.
595b1425120Schristos */
596b1425120Schristos pmap_tlb_asid_reinitialize(ti, TLBINV_ALLUSER);
597b1425120Schristos break;
598b1425120Schristos case TLBINV_ALLKERNEL:
599b1425120Schristos /*
600b1425120Schristos * We need to invalidate all global TLB entries.
601b1425120Schristos */
602b1425120Schristos pmap_tlb_asid_check();
603b1425120Schristos tlb_invalidate_globals();
604b1425120Schristos pmap_tlb_asid_check();
605b1425120Schristos break;
606b1425120Schristos case TLBINV_ALL:
607b1425120Schristos /*
608b1425120Schristos * Flush all the TLB entries (user and kernel).
609b1425120Schristos */
610b1425120Schristos pmap_tlb_asid_reinitialize(ti, TLBINV_ALL);
611b1425120Schristos break;
612b1425120Schristos case TLBINV_NOBODY:
613b1425120Schristos /*
614b1425120Schristos * Might be spurious or another SMT CPU sharing this TLB
615b1425120Schristos * could have already done the work.
616b1425120Schristos */
617b1425120Schristos break;
618b1425120Schristos }
619b1425120Schristos
620b1425120Schristos /*
621b1425120Schristos * Indicate we are done with shutdown event.
622b1425120Schristos */
623b1425120Schristos ti->ti_victim = NULL;
624b1425120Schristos ti->ti_tlbinvop = TLBINV_NOBODY;
625b1425120Schristos TLBINFO_UNLOCK(ti);
626b1425120Schristos }
627b1425120Schristos
628b1425120Schristos /*
629b1425120Schristos * This state machine could be encoded into an array of integers but since all
630b1425120Schristos * the values fit in 3 bits, the 5 entry "table" fits in a 16 bit value which
631b1425120Schristos * can be loaded in a single instruction.
632b1425120Schristos */
633b1425120Schristos #define TLBINV_MAP(op, nobody, one, alluser, allkernel, all) \
634b1425120Schristos (((( (nobody) << 3 * TLBINV_NOBODY) \
635b1425120Schristos | ( (one) << 3 * TLBINV_ONE) \
636b1425120Schristos | ( (alluser) << 3 * TLBINV_ALLUSER) \
637b1425120Schristos | ((allkernel) << 3 * TLBINV_ALLKERNEL) \
638b1425120Schristos | ( (all) << 3 * TLBINV_ALL)) >> 3 * (op)) & 7)
639b1425120Schristos
640b1425120Schristos #define TLBINV_USER_MAP(op) \
641b1425120Schristos TLBINV_MAP(op, TLBINV_ONE, TLBINV_ALLUSER, TLBINV_ALLUSER, \
642b1425120Schristos TLBINV_ALL, TLBINV_ALL)
643b1425120Schristos
644b1425120Schristos #define TLBINV_KERNEL_MAP(op) \
645b1425120Schristos TLBINV_MAP(op, TLBINV_ALLKERNEL, TLBINV_ALL, TLBINV_ALL, \
646b1425120Schristos TLBINV_ALLKERNEL, TLBINV_ALL)
647b1425120Schristos
648b1425120Schristos bool
pmap_tlb_shootdown_bystanders(pmap_t pm)649b1425120Schristos pmap_tlb_shootdown_bystanders(pmap_t pm)
650b1425120Schristos {
651b1425120Schristos /*
6526f2e9c10Sskrll * We don't need to deal with our own TLB.
653b1425120Schristos */
654fe1b443aSmatt
655e4535b97Sskrll UVMHIST_FUNC(__func__);
656e4535b97Sskrll UVMHIST_CALLARGS(maphist, "pm %#jx", (uintptr_t)pm, 0, 0, 0);
6575528d7fdSmatt
6581144d080Sskrll KASSERT(kpreempt_disabled());
6591144d080Sskrll
6608b177c35Sskrll const struct cpu_info * const ci = curcpu();
6611144d080Sskrll
6628b177c35Sskrll kcpuset_t *pm_active = ci->ci_shootdowncpus;
6638b177c35Sskrll kcpuset_copy(pm_active, pm->pm_active);
6645528d7fdSmatt kcpuset_remove(pm_active, cpu_tlb_info(curcpu())->ti_kcpuset);
665b1425120Schristos const bool kernel_p = (pm == pmap_kernel());
666b1425120Schristos bool ipi_sent = false;
667b1425120Schristos
668b1425120Schristos /*
669b1425120Schristos * If pm_active gets more bits set, then it's after all our changes
670b1425120Schristos * have been made so they will already be cognizant of them.
671b1425120Schristos */
672b1425120Schristos
673fe1b443aSmatt for (size_t i = 0; !kcpuset_iszero(pm_active); i++) {
674b1425120Schristos KASSERT(i < pmap_ntlbs);
675b1425120Schristos struct pmap_tlb_info * const ti = pmap_tlbs[i];
676b1425120Schristos KASSERT(tlbinfo_index(ti) == i);
6772562139dSskrll UVMHIST_LOG(maphist, "ti %#jx", (uintptr_t)ti, 0, 0, 0);
678b1425120Schristos /*
679b1425120Schristos * Skip this TLB if there are no active mappings for it.
680b1425120Schristos */
681fe1b443aSmatt if (!kcpuset_intersecting_p(pm_active, ti->ti_kcpuset))
682b1425120Schristos continue;
683b1425120Schristos struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
684fe1b443aSmatt kcpuset_remove(pm_active, ti->ti_kcpuset);
685b1425120Schristos TLBINFO_LOCK(ti);
686fe1b443aSmatt cpuid_t j = kcpuset_ffs_intersecting(pm->pm_onproc,
687fe1b443aSmatt ti->ti_kcpuset);
688f7b8434bSmatt // post decrement since ffs returns bit + 1 or 0 if no bit
689f7b8434bSmatt if (j-- > 0) {
690b1425120Schristos if (kernel_p) {
691b1425120Schristos ti->ti_tlbinvop =
692b1425120Schristos TLBINV_KERNEL_MAP(ti->ti_tlbinvop);
693b1425120Schristos ti->ti_victim = NULL;
694b1425120Schristos } else {
695b1425120Schristos KASSERT(pai->pai_asid);
696b1425120Schristos if (__predict_false(ti->ti_victim == pm)) {
697b1425120Schristos KASSERT(ti->ti_tlbinvop == TLBINV_ONE);
698b1425120Schristos /*
699b1425120Schristos * We still need to invalidate this one
700b1425120Schristos * ASID so there's nothing to change.
701b1425120Schristos */
702b1425120Schristos } else {
703b1425120Schristos ti->ti_tlbinvop =
704b1425120Schristos TLBINV_USER_MAP(ti->ti_tlbinvop);
705b1425120Schristos if (ti->ti_tlbinvop == TLBINV_ONE)
706b1425120Schristos ti->ti_victim = pm;
707b1425120Schristos else
708b1425120Schristos ti->ti_victim = NULL;
709b1425120Schristos }
710b1425120Schristos }
711196ee94dSskrll UVMHIST_LOG(maphist, "tlbinvop %jx victim %#jx", ti->ti_tlbinvop,
712196ee94dSskrll (uintptr_t)ti->ti_victim, 0, 0);
713b1425120Schristos TLBINFO_UNLOCK(ti);
714b1425120Schristos /*
715b1425120Schristos * Now we can send out the shootdown IPIs to a CPU
716b1425120Schristos * that shares this TLB and is currently using this
717b1425120Schristos * pmap. That CPU will process the IPI and do the
718b1425120Schristos * all the work. Any other CPUs sharing that TLB
719b1425120Schristos * will take advantage of that work. pm_onproc might
720b1425120Schristos * change now that we have released the lock but we
721b1425120Schristos * can tolerate spurious shootdowns.
722b1425120Schristos */
723b1425120Schristos cpu_send_ipi(cpu_lookup(j), IPI_SHOOTDOWN);
724b1425120Schristos ipi_sent = true;
725b1425120Schristos continue;
726b1425120Schristos }
727fe1b443aSmatt if (!pmap_tlb_intersecting_active_p(pm, ti)) {
728196ee94dSskrll UVMHIST_LOG(maphist, "pm %#jx not active", (uintptr_t)pm, 0, 0, 0);
729b1425120Schristos /*
730b1425120Schristos * If this pmap has an ASID assigned but it's not
731b1425120Schristos * currently running, nuke its ASID. Next time the
732b1425120Schristos * pmap is activated, it will allocate a new ASID.
733b1425120Schristos * And best of all, we avoid an IPI.
734b1425120Schristos */
735b1425120Schristos KASSERT(!kernel_p);
7365528d7fdSmatt pmap_tlb_pai_reset(ti, pai, pm);
737b1425120Schristos //ti->ti_evcnt_lazy_shots.ev_count++;
738b1425120Schristos }
739b1425120Schristos TLBINFO_UNLOCK(ti);
740b1425120Schristos }
741b1425120Schristos
742cb32a134Spgoyette UVMHIST_LOG(maphist, " <-- done (ipi_sent=%jd)", ipi_sent, 0, 0, 0);
7435528d7fdSmatt
744b1425120Schristos return ipi_sent;
745b1425120Schristos }
7465528d7fdSmatt #endif /* MULTIPROCESSOR && PMAP_TLB_NEED_SHOOTDOWN */
747b1425120Schristos
748b1425120Schristos int
pmap_tlb_update_addr(pmap_t pm,vaddr_t va,pt_entry_t pte,u_int flags)7495528d7fdSmatt pmap_tlb_update_addr(pmap_t pm, vaddr_t va, pt_entry_t pte, u_int flags)
750b1425120Schristos {
7511144d080Sskrll KASSERT(kpreempt_disabled());
7521144d080Sskrll
75342a7dfa3Smatt struct pmap_tlb_info * const ti = cpu_tlb_info(curcpu());
754b1425120Schristos struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
755b1425120Schristos int rv = -1;
756b1425120Schristos
757e4535b97Sskrll UVMHIST_FUNC(__func__);
758e4535b97Sskrll UVMHIST_CALLARGS(maphist, " (pm=%#jx va=%#jx, pte=%#jx flags=%#jx)",
759cb32a134Spgoyette (uintptr_t)pm, va, pte_value(pte), flags);
7605528d7fdSmatt
7615528d7fdSmatt KASSERTMSG(pte_valid_p(pte), "va %#"PRIxVADDR" %#"PRIxPTE,
7625528d7fdSmatt va, pte_value(pte));
7635528d7fdSmatt
764b1425120Schristos TLBINFO_LOCK(ti);
765b1425120Schristos if (pm == pmap_kernel() || PMAP_PAI_ASIDVALID_P(pai, ti)) {
766b1425120Schristos pmap_tlb_asid_check();
7675528d7fdSmatt rv = tlb_update_addr(va, pai->pai_asid, pte,
768b1425120Schristos (flags & PMAP_TLB_INSERT) != 0);
769b1425120Schristos pmap_tlb_asid_check();
7705528d7fdSmatt UVMHIST_LOG(maphist,
771cb32a134Spgoyette " %jd <-- tlb_update_addr(%#jx, %#jx, %#jx, ...)",
7725528d7fdSmatt rv, va, pai->pai_asid, pte_value(pte));
7735528d7fdSmatt KASSERTMSG((flags & PMAP_TLB_INSERT) == 0 || rv == 1,
7745528d7fdSmatt "pmap %p (asid %u) va %#"PRIxVADDR" pte %#"PRIxPTE" rv %d",
7755528d7fdSmatt pm, pai->pai_asid, va, pte_value(pte), rv);
776b1425120Schristos }
7775528d7fdSmatt #if defined(MULTIPROCESSOR) && defined(PMAP_TLB_NEED_SHOOTDOWN)
7785528d7fdSmatt if (flags & PMAP_TLB_NEED_IPI)
7795528d7fdSmatt pm->pm_shootdown_pending = 1;
780b1425120Schristos #endif
781b1425120Schristos TLBINFO_UNLOCK(ti);
782b1425120Schristos
783cb32a134Spgoyette UVMHIST_LOG(maphist, " <-- done (rv=%jd)", rv, 0, 0, 0);
7845528d7fdSmatt
785b1425120Schristos return rv;
786b1425120Schristos }
787b1425120Schristos
788b1425120Schristos void
pmap_tlb_invalidate_addr(pmap_t pm,vaddr_t va)789b1425120Schristos pmap_tlb_invalidate_addr(pmap_t pm, vaddr_t va)
790b1425120Schristos {
7911144d080Sskrll KASSERT(kpreempt_disabled());
7921144d080Sskrll
79342a7dfa3Smatt struct pmap_tlb_info * const ti = cpu_tlb_info(curcpu());
794b1425120Schristos struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
795b1425120Schristos
796e4535b97Sskrll UVMHIST_FUNC(__func__);
797e4535b97Sskrll UVMHIST_CALLARGS(maphist, " (pm=%#jx va=%#jx) ti=%#jx asid=%#jx",
798cb32a134Spgoyette (uintptr_t)pm, va, (uintptr_t)ti, pai->pai_asid);
7991627c0aeSmatt
800b1425120Schristos TLBINFO_LOCK(ti);
801b1425120Schristos if (pm == pmap_kernel() || PMAP_PAI_ASIDVALID_P(pai, ti)) {
802b1425120Schristos pmap_tlb_asid_check();
803cb32a134Spgoyette UVMHIST_LOG(maphist, " invalidating %#jx asid %#jx",
8041627c0aeSmatt va, pai->pai_asid, 0, 0);
805b1425120Schristos tlb_invalidate_addr(va, pai->pai_asid);
806b1425120Schristos pmap_tlb_asid_check();
807b1425120Schristos }
8085528d7fdSmatt #if defined(MULTIPROCESSOR) && defined(PMAP_TLB_NEED_SHOOTDOWN)
809b1425120Schristos pm->pm_shootdown_pending = 1;
810b1425120Schristos #endif
811b1425120Schristos TLBINFO_UNLOCK(ti);
8121627c0aeSmatt UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
813b1425120Schristos }
814b1425120Schristos
815b1425120Schristos static inline void
pmap_tlb_asid_alloc(struct pmap_tlb_info * ti,pmap_t pm,struct pmap_asid_info * pai)816b1425120Schristos pmap_tlb_asid_alloc(struct pmap_tlb_info *ti, pmap_t pm,
817b1425120Schristos struct pmap_asid_info *pai)
818b1425120Schristos {
819b1425120Schristos /*
820b1425120Schristos * We shouldn't have an ASID assigned, and thusly must not be onproc
821b1425120Schristos * nor active.
822b1425120Schristos */
823b1425120Schristos KASSERT(pm != pmap_kernel());
824b1425120Schristos KASSERT(pai->pai_asid == 0);
825b1425120Schristos KASSERT(pai->pai_link.le_prev == NULL);
826b1425120Schristos #if defined(MULTIPROCESSOR)
8271627c0aeSmatt KASSERT(!pmap_tlb_intersecting_onproc_p(pm, ti));
8281627c0aeSmatt KASSERT(!pmap_tlb_intersecting_active_p(pm, ti));
829b1425120Schristos #endif
830b1425120Schristos KASSERT(ti->ti_asids_free > 0);
8311627c0aeSmatt KASSERT(ti->ti_asid_hint > KERNEL_PID);
8321627c0aeSmatt
8331627c0aeSmatt /*
8341627c0aeSmatt * If the last ASID allocated was the maximum ASID, then the
8351627c0aeSmatt * hint will be out of range. Reset the hint to first
8361627c0aeSmatt * available ASID.
8371627c0aeSmatt */
8381627c0aeSmatt if (PMAP_TLB_FLUSH_ASID_ON_RESET
8391627c0aeSmatt && ti->ti_asid_hint > ti->ti_asid_max) {
8401627c0aeSmatt ti->ti_asid_hint = KERNEL_PID + 1;
8411627c0aeSmatt }
8421627c0aeSmatt KASSERTMSG(ti->ti_asid_hint <= ti->ti_asid_max, "hint %u",
8431627c0aeSmatt ti->ti_asid_hint);
844b1425120Schristos
845b1425120Schristos /*
846b1425120Schristos * Let's see if the hinted ASID is free. If not search for
847b1425120Schristos * a new one.
848b1425120Schristos */
8491627c0aeSmatt if (__predict_true(TLBINFO_ASID_INUSE_P(ti, ti->ti_asid_hint))) {
850f90211bbSjdolecek const size_t nbpw = NBBY * sizeof(ti->ti_asid_bitmap._b[0]);
8511627c0aeSmatt size_t i;
8521627c0aeSmatt u_long bits;
853f90211bbSjdolecek for (i = 0; (bits = ~ti->ti_asid_bitmap._b[i]) == 0; i++) {
854f90211bbSjdolecek KASSERT(i < __arraycount(ti->ti_asid_bitmap._b) - 1);
855b1425120Schristos }
856b1425120Schristos /*
857b1425120Schristos * ffs wants to find the first bit set while we want
858b1425120Schristos * to find the first bit cleared.
859b1425120Schristos */
8601627c0aeSmatt const u_int n = __builtin_ffsl(bits) - 1;
8611627c0aeSmatt KASSERTMSG((bits << (nbpw - (n+1))) == (1ul << (nbpw-1)),
8621627c0aeSmatt "n %u bits %#lx", n, bits);
863b1425120Schristos KASSERT(n < nbpw);
864b1425120Schristos ti->ti_asid_hint = n + i * nbpw;
865b1425120Schristos }
8661627c0aeSmatt
867b1425120Schristos KASSERT(ti->ti_asid_hint > KERNEL_PID);
8681627c0aeSmatt KASSERT(ti->ti_asid_hint <= ti->ti_asid_max);
8691627c0aeSmatt KASSERTMSG(PMAP_TLB_FLUSH_ASID_ON_RESET
8701627c0aeSmatt || TLBINFO_ASID_INUSE_P(ti, ti->ti_asid_hint - 1),
871f90211bbSjdolecek "hint %u bitmap %p", ti->ti_asid_hint, &ti->ti_asid_bitmap);
8721627c0aeSmatt KASSERTMSG(!TLBINFO_ASID_INUSE_P(ti, ti->ti_asid_hint),
873f90211bbSjdolecek "hint %u bitmap %p", ti->ti_asid_hint, &ti->ti_asid_bitmap);
874b1425120Schristos
875b1425120Schristos /*
876b1425120Schristos * The hint contains our next ASID so take it and advance the hint.
877b1425120Schristos * Mark it as used and insert the pai into the list of active asids.
878b1425120Schristos * There is also one less asid free in this TLB.
879b1425120Schristos */
880b1425120Schristos pai->pai_asid = ti->ti_asid_hint++;
8811627c0aeSmatt #ifdef MULTIPROCESSOR
8821627c0aeSmatt if (PMAP_TLB_FLUSH_ASID_ON_RESET) {
8831627c0aeSmatt /*
8841627c0aeSmatt * Clean the new ASID from the TLB.
8851627c0aeSmatt */
8861627c0aeSmatt tlb_invalidate_asids(pai->pai_asid, pai->pai_asid);
8871627c0aeSmatt }
8881627c0aeSmatt #endif
889b1425120Schristos TLBINFO_ASID_MARK_USED(ti, pai->pai_asid);
890b1425120Schristos LIST_INSERT_HEAD(&ti->ti_pais, pai, pai_link);
891b1425120Schristos ti->ti_asids_free--;
892b1425120Schristos
893b1425120Schristos #if defined(MULTIPROCESSOR)
894b1425120Schristos /*
895b1425120Schristos * Mark that we now have an active ASID for all CPUs sharing this TLB.
896b1425120Schristos * The bits in pm_active belonging to this TLB can only be changed
897b1425120Schristos * while this TLBs lock is held.
898b1425120Schristos */
899fe1b443aSmatt #if PMAP_TLB_MAX == 1
900fe1b443aSmatt kcpuset_copy(pm->pm_active, kcpuset_running);
901fe1b443aSmatt #else
9025528d7fdSmatt kcpuset_merge(pm->pm_active, ti->ti_kcpuset);
903fe1b443aSmatt #endif
904b1425120Schristos #endif
905b1425120Schristos }
906b1425120Schristos
907b1425120Schristos /*
908b1425120Schristos * Acquire a TLB address space tag (called ASID or TLBPID) and return it.
909b1425120Schristos * ASID might have already been previously acquired.
910b1425120Schristos */
911b1425120Schristos void
pmap_tlb_asid_acquire(pmap_t pm,struct lwp * l)912b1425120Schristos pmap_tlb_asid_acquire(pmap_t pm, struct lwp *l)
913b1425120Schristos {
9141144d080Sskrll KASSERT(kpreempt_disabled());
9151144d080Sskrll
916b1425120Schristos struct cpu_info * const ci = l->l_cpu;
91742a7dfa3Smatt struct pmap_tlb_info * const ti = cpu_tlb_info(ci);
918b1425120Schristos struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
919b1425120Schristos
920e4535b97Sskrll UVMHIST_FUNC(__func__);
921e4535b97Sskrll UVMHIST_CALLARGS(maphist, "(pm=%#jx, l=%#jx, ti=%#jx)", (uintptr_t)pm,
922cb32a134Spgoyette (uintptr_t)l, (uintptr_t)ti, 0);
9235528d7fdSmatt
924b1425120Schristos /*
925b1425120Schristos * Kernels use a fixed ASID and thus doesn't need to acquire one.
926b1425120Schristos */
9271627c0aeSmatt if (pm == pmap_kernel()) {
9281627c0aeSmatt UVMHIST_LOG(maphist, " <-- done (kernel)", 0, 0, 0, 0);
929b1425120Schristos return;
9301627c0aeSmatt }
931b1425120Schristos
932b1425120Schristos TLBINFO_LOCK(ti);
933b1425120Schristos KASSERT(pai->pai_asid <= KERNEL_PID || pai->pai_link.le_prev != NULL);
934b1425120Schristos KASSERT(pai->pai_asid > KERNEL_PID || pai->pai_link.le_prev == NULL);
935fbaba5f3Smatt pmap_tlb_pai_check(ti, true);
936*fbbd98edSskrll
937*fbbd98edSskrll if (__predict_false(!tlbinfo_asids_p(ti))) {
938*fbbd98edSskrll #if defined(MULTIPROCESSOR)
939*fbbd98edSskrll /*
940*fbbd98edSskrll * Mark that we are active for all CPUs sharing this TLB.
941*fbbd98edSskrll * The bits in pm_active belonging to this TLB can only
942*fbbd98edSskrll * be changed while this TLBs lock is held.
943*fbbd98edSskrll */
944*fbbd98edSskrll #if PMAP_TLB_MAX == 1
945*fbbd98edSskrll kcpuset_copy(pm->pm_active, kcpuset_running);
946*fbbd98edSskrll #else
947*fbbd98edSskrll kcpuset_merge(pm->pm_active, ti->ti_kcpuset);
948*fbbd98edSskrll #endif
949*fbbd98edSskrll #endif
950*fbbd98edSskrll } else if (__predict_false(!PMAP_PAI_ASIDVALID_P(pai, ti))) {
951b1425120Schristos /*
952b1425120Schristos * If we've run out ASIDs, reinitialize the ASID space.
953b1425120Schristos */
954*fbbd98edSskrll if (__predict_false(tlbinfo_noasids_p(ti))) {
955b1425120Schristos KASSERT(l == curlwp);
9561627c0aeSmatt UVMHIST_LOG(maphist, " asid reinit", 0, 0, 0, 0);
957b1425120Schristos pmap_tlb_asid_reinitialize(ti, TLBINV_NOBODY);
9581627c0aeSmatt KASSERT(!tlbinfo_noasids_p(ti));
959b1425120Schristos }
960b1425120Schristos
961b1425120Schristos /*
962b1425120Schristos * Get an ASID.
963b1425120Schristos */
964b1425120Schristos pmap_tlb_asid_alloc(ti, pm, pai);
965cb32a134Spgoyette UVMHIST_LOG(maphist, "allocated asid %#jx", pai->pai_asid,
966cb32a134Spgoyette 0, 0, 0);
967b1425120Schristos }
968fbaba5f3Smatt pmap_tlb_pai_check(ti, true);
9695528d7fdSmatt #if defined(MULTIPROCESSOR)
9705528d7fdSmatt KASSERT(kcpuset_isset(pm->pm_active, cpu_index(ci)));
9715528d7fdSmatt #endif
972b1425120Schristos
973b1425120Schristos if (l == curlwp) {
974b1425120Schristos #if defined(MULTIPROCESSOR)
975b1425120Schristos /*
976b1425120Schristos * The bits in pm_onproc belonging to this TLB can only
977b1425120Schristos * be changed while this TLBs lock is held unless atomic
978b1425120Schristos * operations are used.
979b1425120Schristos */
9801627c0aeSmatt KASSERT(pm != pmap_kernel());
981fe1b443aSmatt kcpuset_atomic_set(pm->pm_onproc, cpu_index(ci));
982b1425120Schristos #endif
983b1425120Schristos ci->ci_pmap_asid_cur = pai->pai_asid;
984cb32a134Spgoyette UVMHIST_LOG(maphist, "setting asid to %#jx", pai->pai_asid,
985cb32a134Spgoyette 0, 0, 0);
986bf021c82Sskrll tlb_set_asid(pai->pai_asid, pm);
987b1425120Schristos pmap_tlb_asid_check();
988b1425120Schristos } else {
989b1425120Schristos printf("%s: l (%p) != curlwp %p\n", __func__, l, curlwp);
990b1425120Schristos }
991b1425120Schristos TLBINFO_UNLOCK(ti);
9921627c0aeSmatt UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
993b1425120Schristos }
994b1425120Schristos
995b1425120Schristos void
pmap_tlb_asid_deactivate(pmap_t pm)996b1425120Schristos pmap_tlb_asid_deactivate(pmap_t pm)
997b1425120Schristos {
998e4535b97Sskrll UVMHIST_FUNC(__func__);
999e4535b97Sskrll UVMHIST_CALLARGS(maphist, "pm %#jx", (uintptr_t)pm, 0, 0, 0);
10005528d7fdSmatt
1001b1425120Schristos KASSERT(kpreempt_disabled());
1002b1425120Schristos #if defined(MULTIPROCESSOR)
1003b1425120Schristos /*
1004b1425120Schristos * The kernel pmap is aways onproc and active and must never have
1005b1425120Schristos * those bits cleared. If pmap_remove_all was called, it has already
1006b1425120Schristos * deactivated the pmap and thusly onproc will be 0 so there's nothing
1007b1425120Schristos * to do.
1008b1425120Schristos */
10091627c0aeSmatt if (pm != pmap_kernel() && !kcpuset_iszero(pm->pm_onproc)) {
1010b1425120Schristos struct cpu_info * const ci = curcpu();
1011b1425120Schristos KASSERT(!cpu_intr_p());
1012fe1b443aSmatt KASSERTMSG(kcpuset_isset(pm->pm_onproc, cpu_index(ci)),
1013fe1b443aSmatt "%s: pmap %p onproc %p doesn't include cpu %d (%p)",
1014b1425120Schristos __func__, pm, pm->pm_onproc, cpu_index(ci), ci);
1015b1425120Schristos /*
1016b1425120Schristos * The bits in pm_onproc that belong to this TLB can
1017b1425120Schristos * be changed while this TLBs lock is not held as long
1018b1425120Schristos * as we use atomic ops.
1019b1425120Schristos */
1020fe1b443aSmatt kcpuset_atomic_clear(pm->pm_onproc, cpu_index(ci));
1021b1425120Schristos }
10221627c0aeSmatt #endif
1023bf5805b9Sskrll curcpu()->ci_pmap_asid_cur = KERNEL_PID;
1024bf021c82Sskrll tlb_set_asid(KERNEL_PID, pmap_kernel());
1025a9bc40deSskrll
1026fbaba5f3Smatt pmap_tlb_pai_check(cpu_tlb_info(curcpu()), false);
10271627c0aeSmatt #if defined(DEBUG)
1028b1425120Schristos pmap_tlb_asid_check();
1029b1425120Schristos #endif
1030a9bc40deSskrll UVMHIST_LOG(maphist, " <-- done (pm=%#jx)", (uintptr_t)pm, 0, 0, 0);
1031b1425120Schristos }
1032b1425120Schristos
1033b1425120Schristos void
pmap_tlb_asid_release_all(struct pmap * pm)1034b1425120Schristos pmap_tlb_asid_release_all(struct pmap *pm)
1035b1425120Schristos {
1036e4535b97Sskrll UVMHIST_FUNC(__func__);
1037e4535b97Sskrll UVMHIST_CALLARGS(maphist, "(pm=%#jx)", (uintptr_t)pm, 0, 0, 0);
10385528d7fdSmatt
1039b1425120Schristos KASSERT(pm != pmap_kernel());
1040b1425120Schristos #if defined(MULTIPROCESSOR)
10411627c0aeSmatt //KASSERT(!kcpuset_iszero(pm->pm_onproc)); // XXX
104221b604c9Smatt struct cpu_info * const ci __diagused = curcpu();
10435528d7fdSmatt KASSERT(!kcpuset_isotherset(pm->pm_onproc, cpu_index(ci)));
10445528d7fdSmatt #if PMAP_TLB_MAX > 1
1045fe1b443aSmatt for (u_int i = 0; !kcpuset_iszero(pm->pm_active); i++) {
1046b1425120Schristos KASSERT(i < pmap_ntlbs);
1047b1425120Schristos struct pmap_tlb_info * const ti = pmap_tlbs[i];
1048fe1b443aSmatt #else
1049fe1b443aSmatt struct pmap_tlb_info * const ti = &pmap_tlb0_info;
1050fe1b443aSmatt #endif
1051b1425120Schristos struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
1052b1425120Schristos TLBINFO_LOCK(ti);
10531627c0aeSmatt if (PMAP_PAI_ASIDVALID_P(pai, ti)) {
10541627c0aeSmatt /*
10555528d7fdSmatt * This pmap should not be in use by any other cpu so
10565528d7fdSmatt * we can just reset and be happy.
10571627c0aeSmatt */
10585528d7fdSmatt if (ti->ti_victim == pm)
10595528d7fdSmatt ti->ti_victim = NULL;
1060*fbbd98edSskrll if (__predict_true(tlbinfo_asids_p(ti)))
10615528d7fdSmatt pmap_tlb_pai_reset(ti, pai, pm);
1062b1425120Schristos }
10635528d7fdSmatt KASSERT(pai->pai_link.le_prev == NULL);
10641627c0aeSmatt TLBINFO_UNLOCK(ti);
1065fe1b443aSmatt #if PMAP_TLB_MAX > 1
1066b1425120Schristos }
1067fe1b443aSmatt #endif
10685528d7fdSmatt #ifdef DIAGNOSTIC
10695528d7fdSmatt for (size_t i = 0; i < (PMAP_TLB_MAX > 1 ? pmap_ntlbs : 1); i++) {
10705528d7fdSmatt KASSERTMSG(pm->pm_pai[i].pai_asid == 0,
10715528d7fdSmatt "pm %p i %zu asid %u",
10725528d7fdSmatt pm, i, pm->pm_pai[i].pai_asid);
10735528d7fdSmatt }
10745528d7fdSmatt #endif
1075b1425120Schristos #else
1076b1425120Schristos /*
10776f2e9c10Sskrll * Handle the case of an UP kernel which only has, at most, one TLB.
1078b1425120Schristos * If the pmap has an ASID allocated, free it.
1079b1425120Schristos */
1080b1425120Schristos struct pmap_tlb_info * const ti = &pmap_tlb0_info;
1081b1425120Schristos struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
1082b1425120Schristos TLBINFO_LOCK(ti);
1083b1425120Schristos if (pai->pai_asid > KERNEL_PID) {
108418fc7d3fSmatt if (curcpu()->ci_pmap_asid_cur == pai->pai_asid) {
10851627c0aeSmatt tlb_invalidate_asids(pai->pai_asid, pai->pai_asid);
10861627c0aeSmatt } else {
10875528d7fdSmatt pmap_tlb_pai_reset(ti, pai, pm);
1088b1425120Schristos }
10891627c0aeSmatt }
1090b1425120Schristos TLBINFO_UNLOCK(ti);
1091b1425120Schristos #endif /* MULTIPROCESSOR */
10925528d7fdSmatt UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
1093b1425120Schristos }
1094b1425120Schristos
1095b1425120Schristos void
pmap_tlb_asid_check(void)1096b1425120Schristos pmap_tlb_asid_check(void)
1097b1425120Schristos {
1098196ee94dSskrll UVMHIST_FUNC(__func__);
1099a0cae18fSskrll UVMHIST_CALLED(maphist);
1100196ee94dSskrll
1101b1425120Schristos #ifdef DEBUG
1102b1425120Schristos kpreempt_disable();
11031627c0aeSmatt const tlb_asid_t asid __debugused = tlb_get_asid();
1104a0cae18fSskrll UVMHIST_LOG(maphist, " asid %u vs pmap_cur_asid %u", asid,
1105196ee94dSskrll curcpu()->ci_pmap_asid_cur, 0, 0);
1106b1425120Schristos KDASSERTMSG(asid == curcpu()->ci_pmap_asid_cur,
1107b1425120Schristos "%s: asid (%#x) != current asid (%#x)",
1108b1425120Schristos __func__, asid, curcpu()->ci_pmap_asid_cur);
1109b1425120Schristos kpreempt_enable();
1110b1425120Schristos #endif
1111a0cae18fSskrll UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
1112b1425120Schristos }
1113b1425120Schristos
1114b1425120Schristos #ifdef DEBUG
1115b1425120Schristos void
pmap_tlb_check(pmap_t pm,bool (* func)(void *,vaddr_t,tlb_asid_t,pt_entry_t))1116b1425120Schristos pmap_tlb_check(pmap_t pm, bool (*func)(void *, vaddr_t, tlb_asid_t, pt_entry_t))
1117b1425120Schristos {
111842a7dfa3Smatt struct pmap_tlb_info * const ti = cpu_tlb_info(curcpu());
1119b1425120Schristos struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
1120b1425120Schristos TLBINFO_LOCK(ti);
1121b1425120Schristos if (pm == pmap_kernel() || pai->pai_asid > KERNEL_PID)
1122b1425120Schristos tlb_walk(pm, func);
1123b1425120Schristos TLBINFO_UNLOCK(ti);
1124b1425120Schristos }
1125b1425120Schristos #endif /* DEBUG */
1126196ee94dSskrll
1127196ee94dSskrll #ifdef DDB
1128196ee94dSskrll void
1129196ee94dSskrll pmap_db_tlb_print(struct pmap *pm,
1130196ee94dSskrll void (*pr)(const char *, ...) __printflike(1, 2))
1131196ee94dSskrll {
1132a810700bSskrll #if !defined(MULTIPROCESSOR) || PMAP_TLB_MAX == 1
1133196ee94dSskrll pr(" asid %5u\n", pm->pm_pai[0].pai_asid);
1134196ee94dSskrll #else
1135196ee94dSskrll for (size_t i = 0; i < (PMAP_TLB_MAX > 1 ? pmap_ntlbs : 1); i++) {
1136196ee94dSskrll pr(" tlb %zu asid %5u\n", i, pm->pm_pai[i].pai_asid);
1137196ee94dSskrll }
1138196ee94dSskrll #endif
1139196ee94dSskrll }
1140196ee94dSskrll #endif /* DDB */
1141