1 /* $OpenBSD: kern_srp.c,v 1.13 2020/12/06 19:18:30 cheloha Exp $ */
2
3 /*
4 * Copyright (c) 2014 Jonathan Matthew <jmatthew@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/timeout.h>
22 #include <sys/srp.h>
23 #include <sys/atomic.h>
24
25 void srp_v_gc_start(struct srp_gc *, struct srp *, void *);
26
27 void
srpl_rc_init(struct srpl_rc * rc,void (* ref)(void *,void *),void (* unref)(void *,void *),void * cookie)28 srpl_rc_init(struct srpl_rc *rc, void (*ref)(void *, void *),
29 void (*unref)(void *, void *), void *cookie)
30 {
31 rc->srpl_ref = ref;
32 srp_gc_init(&rc->srpl_gc, unref, cookie);
33 }
34
35 void
srp_gc_init(struct srp_gc * srp_gc,void (* dtor)(void *,void *),void * cookie)36 srp_gc_init(struct srp_gc *srp_gc, void (*dtor)(void *, void *), void *cookie)
37 {
38 srp_gc->srp_gc_dtor = dtor;
39 srp_gc->srp_gc_cookie = cookie;
40 refcnt_init(&srp_gc->srp_gc_refcnt);
41 }
42
43 void
srp_init(struct srp * srp)44 srp_init(struct srp *srp)
45 {
46 srp->ref = NULL;
47 }
48
49 void *
srp_swap_locked(struct srp * srp,void * nv)50 srp_swap_locked(struct srp *srp, void *nv)
51 {
52 void *ov;
53
54 /*
55 * this doesn't have to be as careful as the caller has already
56 * prevented concurrent updates, eg. by holding the kernel lock.
57 * can't be mixed with non-locked updates though.
58 */
59
60 ov = srp->ref;
61 srp->ref = nv;
62
63 return (ov);
64 }
65
66 void
srp_update_locked(struct srp_gc * srp_gc,struct srp * srp,void * v)67 srp_update_locked(struct srp_gc *srp_gc, struct srp *srp, void *v)
68 {
69 if (v != NULL)
70 refcnt_take(&srp_gc->srp_gc_refcnt);
71
72 v = srp_swap_locked(srp, v);
73
74 if (v != NULL)
75 srp_v_gc_start(srp_gc, srp, v);
76 }
77
78 void *
srp_get_locked(struct srp * srp)79 srp_get_locked(struct srp *srp)
80 {
81 return (srp->ref);
82 }
83
84 void
srp_gc_finalize(struct srp_gc * srp_gc)85 srp_gc_finalize(struct srp_gc *srp_gc)
86 {
87 refcnt_finalize(&srp_gc->srp_gc_refcnt, "srpfini");
88 }
89
90 #ifdef MULTIPROCESSOR
91 #include <machine/cpu.h>
92 #include <sys/pool.h>
93
94 struct srp_gc_ctx {
95 struct srp_gc *srp_gc;
96 struct timeout tick;
97 struct srp_hazard hzrd;
98 };
99
100 int srp_v_referenced(struct srp *, void *);
101 void srp_v_gc(void *);
102
103 struct pool srp_gc_ctx_pool;
104
105 void
srp_startup(void)106 srp_startup(void)
107 {
108 pool_init(&srp_gc_ctx_pool, sizeof(struct srp_gc_ctx), 0,
109 IPL_SOFTCLOCK, PR_WAITOK, "srpgc", NULL);
110 }
111
112 int
srp_v_referenced(struct srp * srp,void * v)113 srp_v_referenced(struct srp *srp, void *v)
114 {
115 struct cpu_info *ci;
116 CPU_INFO_ITERATOR cii;
117 u_int i;
118 struct srp_hazard *hzrd;
119
120 CPU_INFO_FOREACH(cii, ci) {
121 for (i = 0; i < nitems(ci->ci_srp_hazards); i++) {
122 hzrd = &ci->ci_srp_hazards[i];
123
124 if (hzrd->sh_p != srp)
125 continue;
126 membar_consumer();
127 if (hzrd->sh_v != v)
128 continue;
129
130 return (1);
131 }
132 }
133
134 return (0);
135 }
136
137 void
srp_v_dtor(struct srp_gc * srp_gc,void * v)138 srp_v_dtor(struct srp_gc *srp_gc, void *v)
139 {
140 (*srp_gc->srp_gc_dtor)(srp_gc->srp_gc_cookie, v);
141
142 refcnt_rele_wake(&srp_gc->srp_gc_refcnt);
143 }
144
145 void
srp_v_gc_start(struct srp_gc * srp_gc,struct srp * srp,void * v)146 srp_v_gc_start(struct srp_gc *srp_gc, struct srp *srp, void *v)
147 {
148 struct srp_gc_ctx *ctx;
149
150 if (!srp_v_referenced(srp, v)) {
151 /* we win */
152 srp_v_dtor(srp_gc, v);
153 return;
154 }
155
156 /* in use, try later */
157
158 ctx = pool_get(&srp_gc_ctx_pool, PR_WAITOK);
159 ctx->srp_gc = srp_gc;
160 ctx->hzrd.sh_p = srp;
161 ctx->hzrd.sh_v = v;
162
163 timeout_set(&ctx->tick, srp_v_gc, ctx);
164 timeout_add(&ctx->tick, 1);
165 }
166
167 void
srp_v_gc(void * x)168 srp_v_gc(void *x)
169 {
170 struct srp_gc_ctx *ctx = x;
171
172 if (srp_v_referenced(ctx->hzrd.sh_p, ctx->hzrd.sh_v)) {
173 /* oh well, try again later */
174 timeout_add(&ctx->tick, 1);
175 return;
176 }
177
178 srp_v_dtor(ctx->srp_gc, ctx->hzrd.sh_v);
179 pool_put(&srp_gc_ctx_pool, ctx);
180 }
181
182 void *
srp_swap(struct srp * srp,void * v)183 srp_swap(struct srp *srp, void *v)
184 {
185 return (atomic_swap_ptr(&srp->ref, v));
186 }
187
188 void
srp_update(struct srp_gc * srp_gc,struct srp * srp,void * v)189 srp_update(struct srp_gc *srp_gc, struct srp *srp, void *v)
190 {
191 if (v != NULL)
192 refcnt_take(&srp_gc->srp_gc_refcnt);
193
194 v = srp_swap(srp, v);
195 if (v != NULL)
196 srp_v_gc_start(srp_gc, srp, v);
197 }
198
199 static inline void *
srp_v(struct srp_hazard * hzrd,struct srp * srp)200 srp_v(struct srp_hazard *hzrd, struct srp *srp)
201 {
202 void *v;
203
204 hzrd->sh_p = srp;
205
206 /*
207 * ensure we update this cpu's hazard pointer to a value that's still
208 * current after the store finishes, otherwise the gc task may already
209 * be destroying it
210 */
211 do {
212 v = srp->ref;
213 hzrd->sh_v = v;
214 membar_consumer();
215 } while (__predict_false(v != srp->ref));
216
217 return (v);
218 }
219
220 void *
srp_enter(struct srp_ref * sr,struct srp * srp)221 srp_enter(struct srp_ref *sr, struct srp *srp)
222 {
223 struct cpu_info *ci = curcpu();
224 struct srp_hazard *hzrd;
225 u_int i;
226
227 for (i = 0; i < nitems(ci->ci_srp_hazards); i++) {
228 hzrd = &ci->ci_srp_hazards[i];
229 if (hzrd->sh_p == NULL) {
230 sr->hz = hzrd;
231 return (srp_v(hzrd, srp));
232 }
233 }
234
235 panic("%s: not enough srp hazard records", __func__);
236
237 /* NOTREACHED */
238 return (NULL);
239 }
240
241 void *
srp_follow(struct srp_ref * sr,struct srp * srp)242 srp_follow(struct srp_ref *sr, struct srp *srp)
243 {
244 return (srp_v(sr->hz, srp));
245 }
246
247 void
srp_leave(struct srp_ref * sr)248 srp_leave(struct srp_ref *sr)
249 {
250 sr->hz->sh_p = NULL;
251 }
252
253 static inline int
srp_referenced(void * v)254 srp_referenced(void *v)
255 {
256 struct cpu_info *ci;
257 CPU_INFO_ITERATOR cii;
258 u_int i;
259 struct srp_hazard *hzrd;
260
261 CPU_INFO_FOREACH(cii, ci) {
262 for (i = 0; i < nitems(ci->ci_srp_hazards); i++) {
263 hzrd = &ci->ci_srp_hazards[i];
264
265 if (hzrd->sh_p != NULL && hzrd->sh_v == v)
266 return (1);
267 }
268 }
269
270 return (0);
271 }
272
273 void
srp_finalize(void * v,const char * wmesg)274 srp_finalize(void *v, const char *wmesg)
275 {
276 while (srp_referenced(v))
277 tsleep_nsec(v, PWAIT, wmesg, MSEC_TO_NSEC(1));
278 }
279
280 #else /* MULTIPROCESSOR */
281
282 void
srp_startup(void)283 srp_startup(void)
284 {
285
286 }
287
288 void
srp_v_gc_start(struct srp_gc * srp_gc,struct srp * srp,void * v)289 srp_v_gc_start(struct srp_gc *srp_gc, struct srp *srp, void *v)
290 {
291 (*srp_gc->srp_gc_dtor)(srp_gc->srp_gc_cookie, v);
292 refcnt_rele_wake(&srp_gc->srp_gc_refcnt);
293 }
294
295 #endif /* MULTIPROCESSOR */
296