xref: /netbsd-src/sys/external/bsd/common/linux/linux_rcu.c (revision a67dcc493312cc407f81472c1a61826b35336082)
1 /*	$NetBSD: linux_rcu.c,v 1.1 2021/12/19 01:33:17 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_rcu.c,v 1.1 2021/12/19 01:33:17 riastradh Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/types.h>
37 #include <sys/condvar.h>
38 #include <sys/cpu.h>
39 #include <sys/kthread.h>
40 #include <sys/mutex.h>
41 #include <sys/sdt.h>
42 #include <sys/xcall.h>
43 
44 #include <linux/rcupdate.h>
45 #include <linux/slab.h>
46 
47 SDT_PROBE_DEFINE0(sdt, linux, rcu, synchronize__start);
48 SDT_PROBE_DEFINE1(sdt, linux, rcu, synchronize__cpu, "unsigned"/*cpu*/);
49 SDT_PROBE_DEFINE0(sdt, linux, rcu, synchronize__done);
50 SDT_PROBE_DEFINE0(sdt, linux, rcu, barrier__start);
51 SDT_PROBE_DEFINE0(sdt, linux, rcu, barrier__done);
52 SDT_PROBE_DEFINE2(sdt, linux, rcu, call__queue,
53     "struct rcu_head *"/*head*/, "void (*)(struct rcu_head *)"/*callback*/);
54 SDT_PROBE_DEFINE2(sdt, linux, rcu, call__run,
55     "struct rcu_head *"/*head*/, "void (*)(struct rcu_head *)"/*callback*/);
56 SDT_PROBE_DEFINE2(sdt, linux, rcu, call__done,
57     "struct rcu_head *"/*head*/, "void (*)(struct rcu_head *)"/*callback*/);
58 SDT_PROBE_DEFINE2(sdt, linux, rcu, kfree__queue,
59     "struct rcu_head *"/*head*/, "void *"/*obj*/);
60 SDT_PROBE_DEFINE2(sdt, linux, rcu, kfree__free,
61     "struct rcu_head *"/*head*/, "void *"/*obj*/);
62 SDT_PROBE_DEFINE2(sdt, linux, rcu, kfree__done,
63     "struct rcu_head *"/*head*/, "void *"/*obj*/);
64 
65 static struct {
66 	kmutex_t	lock;
67 	kcondvar_t	cv;
68 	struct rcu_head	*first_callback;
69 	struct rcu_head	*first_kfree;
70 	struct lwp	*lwp;
71 	uint64_t	gen;
72 	bool		dying;
73 } gc __cacheline_aligned;
74 
75 static void
76 synchronize_rcu_xc(void *a, void *b)
77 {
78 
79 	SDT_PROBE1(sdt, linux, rcu, synchronize__cpu,  cpu_index(curcpu()));
80 }
81 
82 /*
83  * synchronize_rcu()
84  *
85  *	Wait for any pending RCU read section on every CPU to complete
86  *	by triggering on every CPU activity that is blocked by an RCU
87  *	read section.
88  */
89 void
90 synchronize_rcu(void)
91 {
92 
93 	SDT_PROBE0(sdt, linux, rcu, synchronize__start);
94 	xc_wait(xc_broadcast(0, &synchronize_rcu_xc, NULL, NULL));
95 	SDT_PROBE0(sdt, linux, rcu, synchronize__done);
96 }
97 
98 /*
99  * rcu_barrier()
100  *
101  *	Wait for all pending RCU callbacks to complete.
102  *
103  *	Does not imply, and is not implied by, synchronize_rcu.
104  */
105 void
106 rcu_barrier(void)
107 {
108 	uint64_t gen;
109 
110 	SDT_PROBE0(sdt, linux, rcu, barrier__start);
111 	mutex_enter(&gc.lock);
112 	if (gc.first_callback != NULL || gc.first_kfree != NULL) {
113 		gen = gc.gen;
114 		do {
115 			cv_wait(&gc.cv, &gc.lock);
116 		} while (gc.gen == gen);
117 	}
118 	mutex_exit(&gc.lock);
119 	SDT_PROBE0(sdt, linux, rcu, barrier__done);
120 }
121 
122 /*
123  * call_rcu(head, callback)
124  *
125  *	Arrange to call callback(head) after any pending RCU read
126  *	sections on every CPU is complete.  Return immediately.
127  */
128 void
129 call_rcu(struct rcu_head *head, void (*callback)(struct rcu_head *))
130 {
131 
132 	head->rcuh_u.callback = callback;
133 
134 	mutex_enter(&gc.lock);
135 	head->rcuh_next = gc.first_callback;
136 	gc.first_callback = head;
137 	cv_broadcast(&gc.cv);
138 	SDT_PROBE2(sdt, linux, rcu, call__queue,  head, callback);
139 	mutex_exit(&gc.lock);
140 }
141 
142 /*
143  * _kfree_rcu(head, obj)
144  *
145  *	kfree_rcu helper: schedule kfree(obj) using head for storage.
146  */
147 void
148 _kfree_rcu(struct rcu_head *head, void *obj)
149 {
150 
151 	head->rcuh_u.obj = obj;
152 
153 	mutex_enter(&gc.lock);
154 	head->rcuh_next = gc.first_kfree;
155 	gc.first_kfree = head;
156 	cv_broadcast(&gc.cv);
157 	SDT_PROBE2(sdt, linux, rcu, kfree__queue,  head, obj);
158 	mutex_exit(&gc.lock);
159 }
160 
161 static void
162 gc_thread(void *cookie)
163 {
164 	struct rcu_head *head_callback, *head_kfree, *head, *next;
165 
166 	mutex_enter(&gc.lock);
167 	for (;;) {
168 		/* Start with no work.  */
169 		bool work = false;
170 
171 		/* Grab the list of callbacks.  */
172 		if ((head_callback = gc.first_callback) != NULL) {
173 			gc.first_callback = NULL;
174 			work = true;
175 		}
176 
177 		/* Grab the list of objects to kfree.  */
178 		if ((head_kfree = gc.first_kfree) != NULL) {
179 			gc.first_kfree = NULL;
180 			work = true;
181 		}
182 
183 		/*
184 		 * If no work, then either stop, if we're dying, or
185 		 * wait for work, if not.
186 		 */
187 		if (!work) {
188 			if (gc.dying)
189 				break;
190 			cv_wait(&gc.cv, &gc.lock);
191 			continue;
192 		}
193 
194 		/* We have work to do.  Drop the lock to do it.  */
195 		mutex_exit(&gc.lock);
196 
197 		/* Wait for activity on all CPUs.  */
198 		synchronize_rcu();
199 
200 		/* Call the callbacks.  */
201 		for (head = head_callback; head != NULL; head = next) {
202 			void (*callback)(struct rcu_head *) =
203 			    head->rcuh_u.callback;
204 			next = head->rcuh_next;
205 			SDT_PROBE2(sdt, linux, rcu, call__run,
206 			    head, callback);
207 			(*callback)(head);
208 			/*
209 			 * Can't dereference head or invoke
210 			 * callback after this point.
211 			 */
212 			SDT_PROBE2(sdt, linux, rcu, call__done,
213 			    head, callback);
214 		}
215 
216 		/* Free the objects to kfree.  */
217 		for (head = head_kfree; head != NULL; head = next) {
218 			void *obj = head->rcuh_u.obj;
219 			next = head->rcuh_next;
220 			SDT_PROBE2(sdt, linux, rcu, kfree__free,  head, obj);
221 			kfree(obj);
222 			/* Can't dereference head or obj after this point.  */
223 			SDT_PROBE2(sdt, linux, rcu, kfree__done,  head, obj);
224 		}
225 
226 		/* Return to the lock.  */
227 		mutex_enter(&gc.lock);
228 
229 		/* Finished a batch of work.  Notify rcu_barrier.  */
230 		gc.gen++;
231 		cv_broadcast(&gc.cv);
232 	}
233 	KASSERT(gc.first_callback == NULL);
234 	KASSERT(gc.first_kfree == NULL);
235 	mutex_exit(&gc.lock);
236 
237 	kthread_exit(0);
238 }
239 
240 int
241 linux_rcu_gc_init(void)
242 {
243 	int error;
244 
245 	mutex_init(&gc.lock, MUTEX_DEFAULT, IPL_VM);
246 	cv_init(&gc.cv, "lnxrcugc");
247 	gc.first_callback = NULL;
248 	gc.first_kfree = NULL;
249 	gc.gen = 0;
250 	gc.dying = false;
251 
252 	error = kthread_create(PRI_NONE,
253 	    KTHREAD_MPSAFE|KTHREAD_TS|KTHREAD_MUSTJOIN, NULL, &gc_thread, NULL,
254 	    &gc.lwp, "lnxrcugc");
255 	if (error)
256 		goto fail;
257 
258 	/* Success!  */
259 	return 0;
260 
261 fail:	cv_destroy(&gc.cv);
262 	mutex_destroy(&gc.lock);
263 	return error;
264 }
265 
266 void
267 linux_rcu_gc_fini(void)
268 {
269 
270 	mutex_enter(&gc.lock);
271 	gc.dying = true;
272 	cv_broadcast(&gc.cv);
273 	mutex_exit(&gc.lock);
274 
275 	kthread_join(gc.lwp);
276 	gc.lwp = NULL;
277 	KASSERT(gc.first_callback == NULL);
278 	KASSERT(gc.first_kfree == NULL);
279 	cv_destroy(&gc.cv);
280 	mutex_destroy(&gc.lock);
281 }
282