xref: /netbsd-src/sys/kern/subr_pserialize.c (revision 96fc3e30a7c3f7bba53384bf41dad5f78306fac4)
1 /*	$NetBSD: subr_pserialize.c,v 1.6 2013/01/07 23:21:32 rmind Exp $	*/
2 
3 /*-
4  * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /*
30  * Passive serialization.
31  *
32  * Implementation accurately matches the lapsed US patent 4809168, therefore
33  * code is patent-free in the United States.  Your use of this code is at
34  * your own risk.
35  *
36  * Note for NetBSD developers: all changes to this source file must be
37  * approved by the <core>.
38  */
39 
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: subr_pserialize.c,v 1.6 2013/01/07 23:21:32 rmind Exp $");
42 
43 #include <sys/param.h>
44 
45 #include <sys/condvar.h>
46 #include <sys/cpu.h>
47 #include <sys/evcnt.h>
48 #include <sys/kmem.h>
49 #include <sys/mutex.h>
50 #include <sys/pserialize.h>
51 #include <sys/queue.h>
52 #include <sys/xcall.h>
53 
54 struct pserialize {
55 	TAILQ_ENTRY(pserialize)	psz_chain;
56 	lwp_t *			psz_owner;
57 	kcondvar_t		psz_notifier;
58 	kcpuset_t *		psz_target;
59 	kcpuset_t *		psz_pass;
60 };
61 
62 static u_int			psz_work_todo	__cacheline_aligned;
63 static kmutex_t			psz_lock	__cacheline_aligned;
64 static struct evcnt		psz_ev_excl	__cacheline_aligned;
65 
66 /*
67  * As defined in "Method 1":
68  *	q0: "0 MP checkpoints have occured".
69  *	q1: "1 MP checkpoint has occured".
70  *	q2: "2 MP checkpoints have occured".
71  */
72 static TAILQ_HEAD(, pserialize)	psz_queue0	__cacheline_aligned;
73 static TAILQ_HEAD(, pserialize)	psz_queue1	__cacheline_aligned;
74 static TAILQ_HEAD(, pserialize)	psz_queue2	__cacheline_aligned;
75 
76 /*
77  * pserialize_init:
78  *
79  *	Initialize passive serialization structures.
80  */
81 void
82 pserialize_init(void)
83 {
84 
85 	psz_work_todo = 0;
86 	TAILQ_INIT(&psz_queue0);
87 	TAILQ_INIT(&psz_queue1);
88 	TAILQ_INIT(&psz_queue2);
89 	mutex_init(&psz_lock, MUTEX_DEFAULT, IPL_SCHED);
90 	evcnt_attach_dynamic(&psz_ev_excl, EVCNT_TYPE_MISC, NULL,
91 	    "pserialize", "exclusive access");
92 }
93 
94 /*
95  * pserialize_create:
96  *
97  *	Create and initialize a passive serialization object.
98  */
99 pserialize_t
100 pserialize_create(void)
101 {
102 	pserialize_t psz;
103 
104 	psz = kmem_zalloc(sizeof(struct pserialize), KM_SLEEP);
105 	cv_init(&psz->psz_notifier, "psrlz");
106 	kcpuset_create(&psz->psz_target, true);
107 	kcpuset_create(&psz->psz_pass, true);
108 	psz->psz_owner = NULL;
109 
110 	return psz;
111 }
112 
113 /*
114  * pserialize_destroy:
115  *
116  *	Destroy a passive serialization object.
117  */
118 void
119 pserialize_destroy(pserialize_t psz)
120 {
121 
122 	KASSERT(psz->psz_owner == NULL);
123 
124 	cv_destroy(&psz->psz_notifier);
125 	kcpuset_destroy(psz->psz_target);
126 	kcpuset_destroy(psz->psz_pass);
127 	kmem_free(psz, sizeof(struct pserialize));
128 }
129 
130 /*
131  * pserialize_perform:
132  *
133  *	Perform the write side of passive serialization.  The calling
134  *	thread holds an exclusive lock on the data object(s) being updated.
135  *	We wait until every processor in the system has made at least two
136  *	passes through cpu_swichto().  The wait is made with the caller's
137  *	update lock held, but is short term.
138  */
139 void
140 pserialize_perform(pserialize_t psz)
141 {
142 	uint64_t xc;
143 
144 	KASSERT(!cpu_intr_p());
145 	KASSERT(!cpu_softintr_p());
146 
147 	if (__predict_false(panicstr != NULL)) {
148 		return;
149 	}
150 	KASSERT(psz->psz_owner == NULL);
151 	KASSERT(ncpu > 0);
152 
153 	/*
154 	 * Set up the object and put it onto the queue.  The lock
155 	 * activity here provides the necessary memory barrier to
156 	 * make the caller's data update completely visible to
157 	 * other processors.
158 	 */
159 	psz->psz_owner = curlwp;
160 	kcpuset_copy(psz->psz_target, kcpuset_running);
161 	kcpuset_zero(psz->psz_pass);
162 
163 	mutex_spin_enter(&psz_lock);
164 	TAILQ_INSERT_TAIL(&psz_queue0, psz, psz_chain);
165 	psz_work_todo++;
166 	mutex_spin_exit(&psz_lock);
167 
168 	/*
169 	 * Force some context switch activity on every CPU, as the system
170 	 * may not be busy.  Note: should pass the point twice.
171 	 */
172 	xc = xc_broadcast(XC_HIGHPRI, (xcfunc_t)nullop, NULL, NULL);
173 	xc_wait(xc);
174 
175 	/* No need to xc_wait() as we implement our own condvar. */
176 	xc_broadcast(XC_HIGHPRI, (xcfunc_t)nullop, NULL, NULL);
177 
178 	/*
179 	 * Wait for all CPUs to cycle through mi_switch() twice.
180 	 * The last one through will remove our update from the
181 	 * queue and awaken us.
182 	 */
183 	mutex_spin_enter(&psz_lock);
184 	while (!kcpuset_iszero(psz->psz_target)) {
185 		cv_wait(&psz->psz_notifier, &psz_lock);
186 	}
187 	psz_ev_excl.ev_count++;
188 	mutex_spin_exit(&psz_lock);
189 
190 	psz->psz_owner = NULL;
191 }
192 
193 int
194 pserialize_read_enter(void)
195 {
196 
197 	KASSERT(!cpu_intr_p());
198 	return splsoftserial();
199 }
200 
201 void
202 pserialize_read_exit(int s)
203 {
204 
205 	splx(s);
206 }
207 
208 /*
209  * pserialize_switchpoint:
210  *
211  *	Monitor system context switch activity.  Called from machine
212  *	independent code after mi_switch() returns.
213  */
214 void
215 pserialize_switchpoint(void)
216 {
217 	pserialize_t psz, next;
218 	cpuid_t cid;
219 
220 	/*
221 	 * If no updates pending, bail out.  No need to lock in order to
222 	 * test psz_work_todo; the only ill effect of missing an update
223 	 * would be to delay LWPs waiting in pserialize_perform().  That
224 	 * will not happen because updates are on the queue before an
225 	 * xcall is generated (serialization) to tickle every CPU.
226 	 */
227 	if (__predict_true(psz_work_todo == 0)) {
228 		return;
229 	}
230 	mutex_spin_enter(&psz_lock);
231 	cid = cpu_index(curcpu());
232 
233 	/*
234 	 * At first, scan through the second queue and update each request,
235 	 * if passed all processors, then transfer to the third queue.
236 	 */
237 	for (psz = TAILQ_FIRST(&psz_queue1); psz != NULL; psz = next) {
238 		next = TAILQ_NEXT(psz, psz_chain);
239 		if (!kcpuset_match(psz->psz_pass, psz->psz_target)) {
240 			kcpuset_set(psz->psz_pass, cid);
241 			continue;
242 		}
243 		kcpuset_zero(psz->psz_pass);
244 		TAILQ_REMOVE(&psz_queue1, psz, psz_chain);
245 		TAILQ_INSERT_TAIL(&psz_queue2, psz, psz_chain);
246 	}
247 	/*
248 	 * Scan through the first queue and update each request,
249 	 * if passed all processors, then move to the second queue.
250 	 */
251 	for (psz = TAILQ_FIRST(&psz_queue0); psz != NULL; psz = next) {
252 		next = TAILQ_NEXT(psz, psz_chain);
253 		if (!kcpuset_match(psz->psz_pass, psz->psz_target)) {
254 			kcpuset_set(psz->psz_pass, cid);
255 			continue;
256 		}
257 		kcpuset_zero(psz->psz_pass);
258 		TAILQ_REMOVE(&psz_queue0, psz, psz_chain);
259 		TAILQ_INSERT_TAIL(&psz_queue1, psz, psz_chain);
260 	}
261 	/*
262 	 * Process the third queue: entries have been seen twice on every
263 	 * processor, remove from the queue and notify the updating thread.
264 	 */
265 	while ((psz = TAILQ_FIRST(&psz_queue2)) != NULL) {
266 		TAILQ_REMOVE(&psz_queue2, psz, psz_chain);
267 		kcpuset_zero(psz->psz_target);
268 		cv_signal(&psz->psz_notifier);
269 		psz_work_todo--;
270 	}
271 	mutex_spin_exit(&psz_lock);
272 }
273