xref: /netbsd-src/sys/kern/subr_pserialize.c (revision a355028fa4f1eaedfae402807330fbc7bf5e9102)
1 /*	$NetBSD: subr_pserialize.c,v 1.24 2023/10/04 20:28:06 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 2010, 2011, 2023 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /*
30  * Passive serialization.
31  */
32 
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: subr_pserialize.c,v 1.24 2023/10/04 20:28:06 ad Exp $");
35 
36 #include <sys/param.h>
37 
38 #include <sys/atomic.h>
39 #include <sys/cpu.h>
40 #include <sys/evcnt.h>
41 #include <sys/kernel.h>
42 #include <sys/kmem.h>
43 #include <sys/lwp.h>
44 #include <sys/mutex.h>
45 #include <sys/pserialize.h>
46 #include <sys/xcall.h>
47 
48 struct pserialize {
49 	char			psz_dummy;
50 };
51 
52 static kmutex_t			psz_lock	__cacheline_aligned;
53 static struct evcnt		psz_ev_excl	__cacheline_aligned =
54     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pserialize", "exclusive access");
55 EVCNT_ATTACH_STATIC(psz_ev_excl);
56 
57 /*
58  * pserialize_init:
59  *
60  *	Initialize passive serialization structures.
61  */
62 void
pserialize_init(void)63 pserialize_init(void)
64 {
65 
66 	mutex_init(&psz_lock, MUTEX_DEFAULT, IPL_NONE);
67 }
68 
69 /*
70  * pserialize_create:
71  *
72  *	Create and initialize a passive serialization object.
73  */
74 pserialize_t
pserialize_create(void)75 pserialize_create(void)
76 {
77 	pserialize_t psz;
78 
79 	psz = kmem_zalloc(sizeof(*psz), KM_SLEEP);
80 	return psz;
81 }
82 
83 /*
84  * pserialize_destroy:
85  *
86  *	Destroy a passive serialization object.
87  */
88 void
pserialize_destroy(pserialize_t psz)89 pserialize_destroy(pserialize_t psz)
90 {
91 
92 	kmem_free(psz, sizeof(*psz));
93 }
94 
95 /*
96  * pserialize_perform:
97  *
98  *	Perform the write side of passive serialization.
99  */
100 void
pserialize_perform(pserialize_t psz)101 pserialize_perform(pserialize_t psz)
102 {
103 
104 	KASSERT(!cpu_intr_p());
105 	KASSERT(!cpu_softintr_p());
106 
107 	if (__predict_false(panicstr != NULL)) {
108 		return;
109 	}
110 
111 	if (__predict_false(mp_online == false)) {
112 		psz_ev_excl.ev_count++;
113 		return;
114 	}
115 
116 	/*
117 	 * Broadcast a NOP to all CPUs and wait until all of them complete.
118 	 */
119 	xc_barrier(XC_HIGHPRI);
120 
121 	mutex_enter(&psz_lock);
122 	psz_ev_excl.ev_count++;
123 	mutex_exit(&psz_lock);
124 }
125 
126 int
pserialize_read_enter(void)127 pserialize_read_enter(void)
128 {
129 	int s;
130 
131 	s = splsoftserial();
132 	curcpu()->ci_psz_read_depth++;
133 	__insn_barrier();
134 	return s;
135 }
136 
137 void
pserialize_read_exit(int s)138 pserialize_read_exit(int s)
139 {
140 
141 	KASSERT(__predict_false(cold) || kpreempt_disabled());
142 
143 	__insn_barrier();
144 	if (__predict_false(curcpu()->ci_psz_read_depth-- == 0))
145 		panic("mismatching pserialize_read_exit()");
146 	splx(s);
147 }
148 
149 /*
150  * pserialize_in_read_section:
151  *
152  *	True if the caller is in a pserialize read section.  To be used
153  *	only for diagnostic assertions where we want to guarantee the
154  *	condition like:
155  *
156  *		KASSERT(pserialize_in_read_section());
157  */
158 bool
pserialize_in_read_section(void)159 pserialize_in_read_section(void)
160 {
161 
162 	return kpreempt_disabled() && curcpu()->ci_psz_read_depth > 0;
163 }
164 
165 /*
166  * pserialize_not_in_read_section:
167  *
168  *	True if the caller is not in a pserialize read section.  To be
169  *	used only for diagnostic assertions where we want to guarantee
170  *	the condition like:
171  *
172  *		KASSERT(pserialize_not_in_read_section());
173  */
174 bool
pserialize_not_in_read_section(void)175 pserialize_not_in_read_section(void)
176 {
177 	bool notin;
178 	long pctr;
179 
180 	pctr = lwp_pctr();
181 	notin = __predict_true(curcpu()->ci_psz_read_depth == 0);
182 
183 	/*
184 	 * If we had a context switch, we're definitely not in a
185 	 * pserialize read section because pserialize read sections
186 	 * block preemption.
187 	 */
188 	if (__predict_false(pctr != lwp_pctr()))
189 		notin = true;
190 
191 	return notin;
192 }
193