xref: /openbsd-src/sys/kern/kern_srp.c (revision fb8aa7497fded39583f40e800732f9c046411717)
1 /*	$OpenBSD: kern_srp.c,v 1.10 2016/06/01 03:34:32 dlg Exp $ */
2 
3 /*
4  * Copyright (c) 2014 Jonathan Matthew <jmatthew@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/types.h>
21 #include <sys/systm.h>
22 #include <sys/timeout.h>
23 #include <sys/srp.h>
24 #include <sys/atomic.h>
25 
26 void	srp_v_gc_start(struct srp_gc *, struct srp *, void *);
27 
28 void
29 srpl_rc_init(struct srpl_rc *rc,  void (*ref)(void *, void *),
30     void (*unref)(void *, void *), void *cookie)
31 {
32 	rc->srpl_ref = ref;
33 	srp_gc_init(&rc->srpl_gc, unref, cookie);
34 }
35 
36 void
37 srp_gc_init(struct srp_gc *srp_gc, void (*dtor)(void *, void *), void *cookie)
38 {
39 	srp_gc->srp_gc_dtor = dtor;
40 	srp_gc->srp_gc_cookie = cookie;
41 	refcnt_init(&srp_gc->srp_gc_refcnt);
42 }
43 
44 void
45 srp_init(struct srp *srp)
46 {
47 	srp->ref = NULL;
48 }
49 
50 void *
51 srp_swap_locked(struct srp *srp, void *nv)
52 {
53 	void *ov;
54 
55 	/*
56 	 * this doesn't have to be as careful as the caller has already
57 	 * prevented concurrent updates, eg. by holding the kernel lock.
58 	 * can't be mixed with non-locked updates though.
59 	 */
60 
61 	ov = srp->ref;
62 	srp->ref = nv;
63 
64 	return (ov);
65 }
66 
67 void
68 srp_update_locked(struct srp_gc *srp_gc, struct srp *srp, void *v)
69 {
70 	if (v != NULL)
71 		refcnt_take(&srp_gc->srp_gc_refcnt);
72 
73 	v = srp_swap_locked(srp, v);
74 
75 	if (v != NULL)
76 		srp_v_gc_start(srp_gc, srp, v);
77 }
78 
79 void *
80 srp_get_locked(struct srp *srp)
81 {
82 	return (srp->ref);
83 }
84 
85 void
86 srp_gc_finalize(struct srp_gc *srp_gc)
87 {
88 	refcnt_finalize(&srp_gc->srp_gc_refcnt, "srpfini");
89 }
90 
91 #ifdef MULTIPROCESSOR
92 #include <machine/cpu.h>
93 #include <sys/pool.h>
94 
95 struct srp_gc_ctx {
96 	struct srp_gc		*srp_gc;
97 	struct timeout		tick;
98 	struct srp_hazard	hzrd;
99 };
100 
101 int	srp_v_referenced(struct srp *, void *);
102 void	srp_v_gc(void *);
103 
104 struct pool srp_gc_ctx_pool;
105 
106 void
107 srp_startup(void)
108 {
109 	pool_init(&srp_gc_ctx_pool, sizeof(struct srp_gc_ctx), 0, 0,
110 	    PR_WAITOK, "srpgc", NULL);
111 
112 	/* items are allocated in a process, but freed from a timeout */
113 	pool_setipl(&srp_gc_ctx_pool, IPL_SOFTCLOCK);
114 }
115 
116 int
117 srp_v_referenced(struct srp *srp, void *v)
118 {
119 	struct cpu_info *ci;
120 	CPU_INFO_ITERATOR cii;
121 	u_int i;
122 	struct srp_hazard *hzrd;
123 
124 	CPU_INFO_FOREACH(cii, ci) {
125 		for (i = 0; i < nitems(ci->ci_srp_hazards); i++) {
126 			hzrd = &ci->ci_srp_hazards[i];
127 
128 			if (hzrd->sh_p != srp)
129 				continue;
130 			membar_consumer();
131 			if (hzrd->sh_v != v)
132 				continue;
133 
134 			return (1);
135 		}
136 	}
137 
138 	return (0);
139 }
140 
141 void
142 srp_v_dtor(struct srp_gc *srp_gc, void *v)
143 {
144 	(*srp_gc->srp_gc_dtor)(srp_gc->srp_gc_cookie, v);
145 
146 	refcnt_rele_wake(&srp_gc->srp_gc_refcnt);
147 }
148 
149 void
150 srp_v_gc_start(struct srp_gc *srp_gc, struct srp *srp, void *v)
151 {
152 	struct srp_gc_ctx *ctx;
153 
154 	if (!srp_v_referenced(srp, v)) {
155 		/* we win */
156 		srp_v_dtor(srp_gc, v);
157 		return;
158 	}
159 
160 	/* in use, try later */
161 
162 	ctx = pool_get(&srp_gc_ctx_pool, PR_WAITOK);
163 	ctx->srp_gc = srp_gc;
164 	ctx->hzrd.sh_p = srp;
165 	ctx->hzrd.sh_v = v;
166 
167 	timeout_set(&ctx->tick, srp_v_gc, ctx);
168 	timeout_add(&ctx->tick, 1);
169 }
170 
171 void
172 srp_v_gc(void *x)
173 {
174 	struct srp_gc_ctx *ctx = x;
175 
176 	if (srp_v_referenced(ctx->hzrd.sh_p, ctx->hzrd.sh_v)) {
177 		/* oh well, try again later */
178 		timeout_add(&ctx->tick, 1);
179 		return;
180 	}
181 
182 	srp_v_dtor(ctx->srp_gc, ctx->hzrd.sh_v);
183 	pool_put(&srp_gc_ctx_pool, ctx);
184 }
185 
186 void *
187 srp_swap(struct srp *srp, void *v)
188 {
189 	return (atomic_swap_ptr(&srp->ref, v));
190 }
191 
192 void
193 srp_update(struct srp_gc *srp_gc, struct srp *srp, void *v)
194 {
195 	if (v != NULL)
196 		refcnt_take(&srp_gc->srp_gc_refcnt);
197 
198 	v = srp_swap(srp, v);
199 	if (v != NULL)
200 		srp_v_gc_start(srp_gc, srp, v);
201 }
202 
203 static inline void *
204 srp_v(struct srp_hazard *hzrd, struct srp *srp)
205 {
206 	void *v;
207 
208 	hzrd->sh_p = srp;
209 
210 	/*
211 	 * ensure we update this cpu's hazard pointer to a value that's still
212 	 * current after the store finishes, otherwise the gc task may already
213 	 * be destroying it
214 	 */
215 	do {
216 		v = srp->ref;
217 		hzrd->sh_v = v;
218 		membar_consumer();
219 	} while (__predict_false(v != srp->ref));
220 
221 	return (v);
222 }
223 
224 void *
225 srp_enter(struct srp_ref *sr, struct srp *srp)
226 {
227 	struct cpu_info *ci = curcpu();
228 	struct srp_hazard *hzrd;
229 	u_int i;
230 
231 	for (i = 0; i < nitems(ci->ci_srp_hazards); i++) {
232 		hzrd = &ci->ci_srp_hazards[i];
233 		if (hzrd->sh_p == NULL) {
234 			sr->hz = hzrd;
235 			return (srp_v(hzrd, srp));
236 		}
237 	}
238 
239 	panic("%s: not enough srp hazard records", __func__);
240 
241 	/* NOTREACHED */
242 	return (NULL);
243 }
244 
245 void *
246 srp_follow(struct srp_ref *sr, struct srp *srp)
247 {
248 	return (srp_v(sr->hz, srp));
249 }
250 
251 void
252 srp_leave(struct srp_ref *sr)
253 {
254 	sr->hz->sh_p = NULL;
255 }
256 
257 static inline int
258 srp_referenced(void *v)
259 {
260 	struct cpu_info *ci;
261 	CPU_INFO_ITERATOR cii;
262 	u_int i;
263 	struct srp_hazard *hzrd;
264 
265 	CPU_INFO_FOREACH(cii, ci) {
266 		for (i = 0; i < nitems(ci->ci_srp_hazards); i++) {
267 			hzrd = &ci->ci_srp_hazards[i];
268 
269 			if (hzrd->sh_p != NULL && hzrd->sh_v == v)
270 				return (1);
271 		}
272 	}
273 
274 	return (0);
275 }
276 
277 void
278 srp_finalize(void *v, const char *wmesg)
279 {
280 	while (srp_referenced(v))
281 		tsleep(v, PWAIT, wmesg, 1);
282 }
283 
284 #else /* MULTIPROCESSOR */
285 
286 void
287 srp_startup(void)
288 {
289 
290 }
291 
292 void
293 srp_v_gc_start(struct srp_gc *srp_gc, struct srp *srp, void *v)
294 {
295 	(*srp_gc->srp_gc_dtor)(srp_gc->srp_gc_cookie, v);
296 	refcnt_rele_wake(&srp_gc->srp_gc_refcnt);
297 }
298 
299 #endif /* MULTIPROCESSOR */
300