1 /*-
2 * Copyright (c) 2011 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas of 3am Software Foundry.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31
32 __KERNEL_RCSID(0, "$NetBSD: pmap_synci.c,v 1.5 2020/04/13 08:05:22 skrll Exp $");
33
34 #define __PMAP_PRIVATE
35
36 #include "opt_multiprocessor.h"
37
38 #include <sys/param.h>
39
40 #include <sys/atomic.h>
41 #include <sys/cpu.h>
42 #include <sys/mutex.h>
43 #include <sys/systm.h>
44
45 #include <uvm/uvm.h>
46
47 #if defined(MULTIPROCESSOR)
48 u_int pmap_tlb_synci_page_mask;
49 u_int pmap_tlb_synci_map_mask;
50
51 void
pmap_tlb_syncicache_ast(struct cpu_info * ci)52 pmap_tlb_syncicache_ast(struct cpu_info *ci)
53 {
54 struct pmap_tlb_info * const ti = cpu_tlb_info(ci);
55
56 KASSERT(kpreempt_disabled());
57
58 uint32_t page_bitmap = atomic_swap_32(&ti->ti_synci_page_bitmap, 0);
59 #if 0
60 printf("%s: need to sync %#x\n", __func__, page_bitmap);
61 #endif
62 ti->ti_evcnt_synci_asts.ev_count++;
63 /*
64 * If every bit is set in the bitmap, sync the entire icache.
65 */
66 if (page_bitmap == pmap_tlb_synci_map_mask) {
67 pmap_md_icache_sync_all();
68 ti->ti_evcnt_synci_all.ev_count++;
69 ti->ti_evcnt_synci_pages.ev_count += pmap_tlb_synci_page_mask+1;
70 return;
71 }
72
73 /*
74 * Loop through the bitmap clearing each set of indices for each page.
75 */
76 for (vaddr_t va = 0;
77 page_bitmap != 0;
78 page_bitmap >>= 1, va += PAGE_SIZE) {
79 if (page_bitmap & 1) {
80 /*
81 * Each bit set represents a page index to be synced.
82 */
83 pmap_md_icache_sync_range_index(va, PAGE_SIZE);
84 ti->ti_evcnt_synci_pages.ev_count++;
85 }
86 }
87 }
88
89 void
pmap_tlb_syncicache(vaddr_t va,const kcpuset_t * page_onproc)90 pmap_tlb_syncicache(vaddr_t va, const kcpuset_t *page_onproc)
91 {
92 KASSERT(kpreempt_disabled());
93 /*
94 * We don't sync the icache here but let ast do it for us just before
95 * returning to userspace. We do this because we don't really know
96 * on which CPU we will return to userspace and if we synch the icache
97 * now it might not be on the CPU we need it on. In addition, others
98 * threads might sync the icache before we get to return to userland
99 * so there's no reason for us to do it.
100 *
101 * Each TLB/cache keeps a synci sequence number which gets advanced
102 * each time that TLB/cache performs a pmap_md_sync_icache_all. When
103 * we return to userland, we check the pmap's corresponding synci
104 * sequence number for that TLB/cache. If they match, it means that
105 * no one has yet synched the icache so we much do it ourselves. If
106 * they don't match someone has already synced the icache for us.
107 *
108 * There is a small chance that the generation numbers will wrap and
109 * then become equal but that's a one in 4 billion cache and will
110 * just cause an extra sync of the icache.
111 */
112 struct cpu_info * const ci = curcpu();
113 kcpuset_t *onproc;
114 kcpuset_create(&onproc, true);
115 const uint32_t page_mask =
116 1L << ((va >> PGSHIFT) & pmap_tlb_synci_page_mask);
117 for (size_t i = 0; i < pmap_ntlbs; i++) {
118 struct pmap_tlb_info * const ti = pmap_tlbs[i];
119 TLBINFO_LOCK(ti);
120 for (;;) {
121 uint32_t old_page_bitmap = ti->ti_synci_page_bitmap;
122 if (old_page_bitmap & page_mask) {
123 ti->ti_evcnt_synci_duplicate.ev_count++;
124 break;
125 }
126
127 uint32_t orig_page_bitmap = atomic_cas_32(
128 &ti->ti_synci_page_bitmap, old_page_bitmap,
129 old_page_bitmap | page_mask);
130
131 if (orig_page_bitmap == old_page_bitmap) {
132 if (old_page_bitmap == 0) {
133 kcpuset_merge(onproc, ti->ti_kcpuset);
134 } else {
135 ti->ti_evcnt_synci_deferred.ev_count++;
136 }
137 ti->ti_evcnt_synci_desired.ev_count++;
138 break;
139 }
140 }
141 #if 0
142 printf("%s: %s: %x to %x on cpus %#x\n", __func__,
143 ti->ti_name, page_mask, ti->ti_synci_page_bitmap,
144 onproc & page_onproc & ti->ti_cpu_mask);
145 #endif
146 TLBINFO_UNLOCK(ti);
147 }
148 kcpuset_intersect(onproc, page_onproc);
149 if (__predict_false(!kcpuset_iszero(onproc))) {
150 /*
151 * If the cpu need to sync this page, tell the current lwp
152 * to sync the icache before it returns to userspace.
153 */
154 if (kcpuset_isset(onproc, cpu_index(ci))) {
155 if (ci->ci_flags & CPUF_USERPMAP) {
156 curlwp->l_md.md_astpending = 1; /* force call to ast() */
157 ci->ci_evcnt_synci_onproc_rqst.ev_count++;
158 } else {
159 ci->ci_evcnt_synci_deferred_rqst.ev_count++;
160 }
161 kcpuset_clear(onproc, cpu_index(ci));
162 }
163
164 /*
165 * For each cpu that is affect, send an IPI telling
166 * that CPU that the current thread needs to sync its icache.
167 * We might cause some spurious icache syncs but that's not
168 * going to break anything.
169 */
170 for (cpuid_t n = kcpuset_ffs(onproc);
171 n-- > 0;
172 n = kcpuset_ffs(onproc)) {
173 kcpuset_clear(onproc, n);
174 cpu_send_ipi(cpu_lookup(n), IPI_SYNCICACHE);
175 }
176 }
177 kcpuset_destroy(onproc);
178 }
179
180 void
pmap_tlb_syncicache_wanted(struct cpu_info * ci)181 pmap_tlb_syncicache_wanted(struct cpu_info *ci)
182 {
183 struct pmap_tlb_info * const ti = cpu_tlb_info(ci);
184
185 KASSERT(cpu_intr_p());
186
187 TLBINFO_LOCK(ti);
188
189 /*
190 * We might have been notified because another CPU changed an exec
191 * page and now needs us to sync the icache so tell the current lwp
192 * to do the next time it returns to userland (which should be very
193 * soon).
194 */
195 if (ti->ti_synci_page_bitmap && (ci->ci_flags & CPUF_USERPMAP)) {
196 curlwp->l_md.md_astpending = 1; /* force call to ast() */
197 ci->ci_evcnt_synci_ipi_rqst.ev_count++;
198 }
199
200 TLBINFO_UNLOCK(ti);
201
202 }
203 #endif /* MULTIPROCESSOR */
204