xref: /netbsd-src/sys/uvm/pmap/pmap_synci.c (revision d2599c324ce1a4fd57bbdf438dc207f2d07cdfdd)
1b1425120Schristos /*-
2b1425120Schristos  * Copyright (c) 2011 The NetBSD Foundation, Inc.
3b1425120Schristos  * All rights reserved.
4b1425120Schristos  *
5b1425120Schristos  * This code is derived from software contributed to The NetBSD Foundation
6b1425120Schristos  * by Matt Thomas of 3am Software Foundry.
7b1425120Schristos  *
8b1425120Schristos  * Redistribution and use in source and binary forms, with or without
9b1425120Schristos  * modification, are permitted provided that the following conditions
10b1425120Schristos  * are met:
11b1425120Schristos  * 1. Redistributions of source code must retain the above copyright
12b1425120Schristos  *    notice, this list of conditions and the following disclaimer.
13b1425120Schristos  * 2. Redistributions in binary form must reproduce the above copyright
14b1425120Schristos  *    notice, this list of conditions and the following disclaimer in the
15b1425120Schristos  *    documentation and/or other materials provided with the distribution.
16b1425120Schristos  *
17b1425120Schristos  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18b1425120Schristos  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19b1425120Schristos  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20b1425120Schristos  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21b1425120Schristos  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22b1425120Schristos  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23b1425120Schristos  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24b1425120Schristos  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25b1425120Schristos  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26b1425120Schristos  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27b1425120Schristos  * POSSIBILITY OF SUCH DAMAGE.
28b1425120Schristos  */
29b1425120Schristos 
30b1425120Schristos #include <sys/cdefs.h>
31b1425120Schristos 
32*d2599c32Sskrll __KERNEL_RCSID(0, "$NetBSD: pmap_synci.c,v 1.5 2020/04/13 08:05:22 skrll Exp $");
33b1425120Schristos 
34b1425120Schristos #define __PMAP_PRIVATE
35b1425120Schristos 
36b1425120Schristos #include "opt_multiprocessor.h"
37b1425120Schristos 
38b1425120Schristos #include <sys/param.h>
3939914130Sskrll 
40b1425120Schristos #include <sys/atomic.h>
41b1425120Schristos #include <sys/cpu.h>
4239914130Sskrll #include <sys/mutex.h>
4339914130Sskrll #include <sys/systm.h>
44b1425120Schristos 
45b1425120Schristos #include <uvm/uvm.h>
46b1425120Schristos 
47b1425120Schristos #if defined(MULTIPROCESSOR)
485528d7fdSmatt u_int	pmap_tlb_synci_page_mask;
495528d7fdSmatt u_int	pmap_tlb_synci_map_mask;
505528d7fdSmatt 
51b1425120Schristos void
pmap_tlb_syncicache_ast(struct cpu_info * ci)525528d7fdSmatt pmap_tlb_syncicache_ast(struct cpu_info *ci)
53b1425120Schristos {
5442a7dfa3Smatt 	struct pmap_tlb_info * const ti = cpu_tlb_info(ci);
55b1425120Schristos 
56b1425120Schristos 	KASSERT(kpreempt_disabled());
57b1425120Schristos 
58b1425120Schristos 	uint32_t page_bitmap = atomic_swap_32(&ti->ti_synci_page_bitmap, 0);
59b1425120Schristos #if 0
60b1425120Schristos 	printf("%s: need to sync %#x\n", __func__, page_bitmap);
61b1425120Schristos #endif
62b1425120Schristos 	ti->ti_evcnt_synci_asts.ev_count++;
63b1425120Schristos 	/*
64b1425120Schristos 	 * If every bit is set in the bitmap, sync the entire icache.
65b1425120Schristos 	 */
66b1425120Schristos 	if (page_bitmap == pmap_tlb_synci_map_mask) {
67b1425120Schristos 		pmap_md_icache_sync_all();
68b1425120Schristos 		ti->ti_evcnt_synci_all.ev_count++;
69b1425120Schristos 		ti->ti_evcnt_synci_pages.ev_count += pmap_tlb_synci_page_mask+1;
70b1425120Schristos 		return;
71b1425120Schristos 	}
72b1425120Schristos 
73b1425120Schristos 	/*
74b1425120Schristos 	 * Loop through the bitmap clearing each set of indices for each page.
75b1425120Schristos 	 */
76b1425120Schristos 	for (vaddr_t va = 0;
77b1425120Schristos 	     page_bitmap != 0;
78b1425120Schristos 	     page_bitmap >>= 1, va += PAGE_SIZE) {
79b1425120Schristos 		if (page_bitmap & 1) {
80b1425120Schristos 			/*
81b1425120Schristos 			 * Each bit set represents a page index to be synced.
82b1425120Schristos 			 */
83b1425120Schristos 			pmap_md_icache_sync_range_index(va, PAGE_SIZE);
84b1425120Schristos 			ti->ti_evcnt_synci_pages.ev_count++;
85b1425120Schristos 		}
86b1425120Schristos 	}
87b1425120Schristos }
88b1425120Schristos 
89b1425120Schristos void
pmap_tlb_syncicache(vaddr_t va,const kcpuset_t * page_onproc)905528d7fdSmatt pmap_tlb_syncicache(vaddr_t va, const kcpuset_t *page_onproc)
91b1425120Schristos {
92b1425120Schristos 	KASSERT(kpreempt_disabled());
93b1425120Schristos 	/*
94b1425120Schristos 	 * We don't sync the icache here but let ast do it for us just before
95b1425120Schristos 	 * returning to userspace.  We do this because we don't really know
96b1425120Schristos 	 * on which CPU we will return to userspace and if we synch the icache
97b1425120Schristos 	 * now it might not be on the CPU we need it on.  In addition, others
98b1425120Schristos 	 * threads might sync the icache before we get to return to userland
99b1425120Schristos 	 * so there's no reason for us to do it.
100b1425120Schristos 	 *
101b1425120Schristos 	 * Each TLB/cache keeps a synci sequence number which gets advanced
102b1425120Schristos 	 * each time that TLB/cache performs a pmap_md_sync_icache_all.  When
103b1425120Schristos 	 * we return to userland, we check the pmap's corresponding synci
104b1425120Schristos 	 * sequence number for that TLB/cache.  If they match, it means that
105b1425120Schristos 	 * no one has yet synched the icache so we much do it ourselves.  If
106b1425120Schristos 	 * they don't match someone has already synced the icache for us.
107b1425120Schristos 	 *
108b1425120Schristos 	 * There is a small chance that the generation numbers will wrap and
109b1425120Schristos 	 * then become equal but that's a one in 4 billion cache and will
110b1425120Schristos 	 * just cause an extra sync of the icache.
111b1425120Schristos 	 */
1125528d7fdSmatt 	struct cpu_info * const ci = curcpu();
1135528d7fdSmatt 	kcpuset_t *onproc;
1145528d7fdSmatt 	kcpuset_create(&onproc, true);
115b1425120Schristos 	const uint32_t page_mask =
116b1425120Schristos 	    1L << ((va >> PGSHIFT) & pmap_tlb_synci_page_mask);
117b1425120Schristos 	for (size_t i = 0; i < pmap_ntlbs; i++) {
118b1425120Schristos 		struct pmap_tlb_info * const ti = pmap_tlbs[i];
119b1425120Schristos 		TLBINFO_LOCK(ti);
120b1425120Schristos 		for (;;) {
121b1425120Schristos 			uint32_t old_page_bitmap = ti->ti_synci_page_bitmap;
122b1425120Schristos 			if (old_page_bitmap & page_mask) {
123b1425120Schristos 				ti->ti_evcnt_synci_duplicate.ev_count++;
124b1425120Schristos 				break;
125b1425120Schristos 			}
126b1425120Schristos 
127b1425120Schristos 			uint32_t orig_page_bitmap = atomic_cas_32(
128b1425120Schristos 			    &ti->ti_synci_page_bitmap, old_page_bitmap,
129b1425120Schristos 			    old_page_bitmap | page_mask);
130b1425120Schristos 
131b1425120Schristos 			if (orig_page_bitmap == old_page_bitmap) {
132b1425120Schristos 				if (old_page_bitmap == 0) {
1335528d7fdSmatt 					kcpuset_merge(onproc, ti->ti_kcpuset);
134b1425120Schristos 				} else {
135b1425120Schristos 					ti->ti_evcnt_synci_deferred.ev_count++;
136b1425120Schristos 				}
137b1425120Schristos 				ti->ti_evcnt_synci_desired.ev_count++;
138b1425120Schristos 				break;
139b1425120Schristos 			}
140b1425120Schristos 		}
141b1425120Schristos #if 0
142b1425120Schristos 		printf("%s: %s: %x to %x on cpus %#x\n", __func__,
143b1425120Schristos 		    ti->ti_name, page_mask, ti->ti_synci_page_bitmap,
144b1425120Schristos 		     onproc & page_onproc & ti->ti_cpu_mask);
145b1425120Schristos #endif
146b1425120Schristos 		TLBINFO_UNLOCK(ti);
147b1425120Schristos 	}
1485528d7fdSmatt 	kcpuset_intersect(onproc, page_onproc);
1495528d7fdSmatt 	if (__predict_false(!kcpuset_iszero(onproc))) {
150b1425120Schristos 		/*
151b1425120Schristos 		 * If the cpu need to sync this page, tell the current lwp
152b1425120Schristos 		 * to sync the icache before it returns to userspace.
153b1425120Schristos 		 */
1545528d7fdSmatt 		if (kcpuset_isset(onproc, cpu_index(ci))) {
1555528d7fdSmatt 			if (ci->ci_flags & CPUF_USERPMAP) {
156b1425120Schristos 				curlwp->l_md.md_astpending = 1;	/* force call to ast() */
1575528d7fdSmatt 				ci->ci_evcnt_synci_onproc_rqst.ev_count++;
158b1425120Schristos 			} else {
1595528d7fdSmatt 				ci->ci_evcnt_synci_deferred_rqst.ev_count++;
160b1425120Schristos 			}
1615528d7fdSmatt 			kcpuset_clear(onproc, cpu_index(ci));
162b1425120Schristos 		}
163b1425120Schristos 
164b1425120Schristos 		/*
165b1425120Schristos 		 * For each cpu that is affect, send an IPI telling
166b1425120Schristos 		 * that CPU that the current thread needs to sync its icache.
167b1425120Schristos 		 * We might cause some spurious icache syncs but that's not
168b1425120Schristos 		 * going to break anything.
169b1425120Schristos 		 */
1705528d7fdSmatt 		for (cpuid_t n = kcpuset_ffs(onproc);
1715528d7fdSmatt 		     n-- > 0;
1725528d7fdSmatt 		     n = kcpuset_ffs(onproc)) {
1735528d7fdSmatt 			kcpuset_clear(onproc, n);
1745528d7fdSmatt 			cpu_send_ipi(cpu_lookup(n), IPI_SYNCICACHE);
175b1425120Schristos 		}
176b1425120Schristos 	}
1775528d7fdSmatt 	kcpuset_destroy(onproc);
178b1425120Schristos }
179b1425120Schristos 
180b1425120Schristos void
pmap_tlb_syncicache_wanted(struct cpu_info * ci)181b1425120Schristos pmap_tlb_syncicache_wanted(struct cpu_info *ci)
182b1425120Schristos {
18342a7dfa3Smatt 	struct pmap_tlb_info * const ti = cpu_tlb_info(ci);
184b1425120Schristos 
185b1425120Schristos 	KASSERT(cpu_intr_p());
186b1425120Schristos 
187b1425120Schristos 	TLBINFO_LOCK(ti);
188b1425120Schristos 
189b1425120Schristos 	/*
190b1425120Schristos 	 * We might have been notified because another CPU changed an exec
191b1425120Schristos 	 * page and now needs us to sync the icache so tell the current lwp
192b1425120Schristos 	 * to do the next time it returns to userland (which should be very
193b1425120Schristos 	 * soon).
194b1425120Schristos 	 */
195b1425120Schristos 	if (ti->ti_synci_page_bitmap && (ci->ci_flags & CPUF_USERPMAP)) {
196b1425120Schristos 		curlwp->l_md.md_astpending = 1;	/* force call to ast() */
197b1425120Schristos 		ci->ci_evcnt_synci_ipi_rqst.ev_count++;
198b1425120Schristos 	}
199b1425120Schristos 
200b1425120Schristos 	TLBINFO_UNLOCK(ti);
201b1425120Schristos 
202b1425120Schristos }
203b1425120Schristos #endif /* MULTIPROCESSOR */
204