xref: /openbsd-src/sys/kern/kern_srp.c (revision 9b9d2a55a62c8e82206c25f94fcc7f4e2765250e)
1 /*	$OpenBSD: kern_srp.c,v 1.1 2015/07/02 01:34:00 dlg Exp $ */
2 
3 /*
4  * Copyright (c) 2014 Jonathan Matthew <jmatthew@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/types.h>
21 #include <sys/systm.h>
22 #include <sys/proc.h>
23 #include <sys/atomic.h>
24 
25 #include <sys/srp.h>
26 
27 void	srp_v_gc_start(struct srp_gc *, struct srp *, void *);
28 
29 void
30 srp_gc_init(struct srp_gc *srp_gc, void (*dtor)(void *, void *), void *cookie)
31 {
32 	srp_gc->srp_gc_dtor = dtor;
33 	srp_gc->srp_gc_cookie = cookie;
34 	srp_gc->srp_gc_refcount = 1;
35 }
36 
37 void
38 srp_init(struct srp *srp)
39 {
40 	srp->ref = NULL;
41 }
42 
43 void
44 srp_update_locked(struct srp_gc *srp_gc, struct srp *srp, void *nv)
45 {
46 	void *ov;
47 
48 	if (nv != NULL)
49 		atomic_inc_int(&srp_gc->srp_gc_refcount);
50 
51 	/*
52 	 * this doesn't have to be as careful as the caller has already
53 	 * prevented concurrent updates, eg. by holding the kernel lock.
54 	 * can't be mixed with non-locked updates though.
55 	 */
56 
57 	ov = srp->ref;
58 	srp->ref = nv;
59 	if (ov != NULL)
60 		srp_v_gc_start(srp_gc, srp, ov);
61 }
62 
63 void *
64 srp_get_locked(struct srp *srp)
65 {
66 	return (srp->ref);
67 }
68 
69 #ifdef MULTIPROCESSOR
70 #include <machine/cpu.h>
71 #include <sys/pool.h>
72 
73 struct srp_gc_ctx {
74 	struct srp_gc		*srp_gc;
75 	struct timeout		tick;
76 	struct srp_hazard	hzrd;
77 };
78 
79 int	srp_v_referenced(struct srp *, void *);
80 void	srp_v_gc(void *);
81 
82 struct pool srp_gc_ctx_pool;
83 
84 void
85 srp_startup(void)
86 {
87 	pool_init(&srp_gc_ctx_pool, sizeof(struct srp_gc_ctx), 0, 0,
88 	    PR_WAITOK, "srpgc", NULL);
89 
90 	/* items are allocated in a process, but freed from a timeout */
91 	pool_setipl(&srp_gc_ctx_pool, IPL_SOFTCLOCK);
92 }
93 
94 int
95 srp_v_referenced(struct srp *srp, void *v)
96 {
97 	struct cpu_info *ci;
98 	CPU_INFO_ITERATOR cii;
99 	u_int i;
100 	struct srp_hazard *hzrd;
101 
102 	CPU_INFO_FOREACH(cii, ci) {
103 		for (i = 0; i < nitems(ci->ci_srp_hazards); i++) {
104 			hzrd = &ci->ci_srp_hazards[i];
105 
106 			if (hzrd->sh_p != srp)
107 				continue;
108 			membar_consumer();
109 			if (hzrd->sh_v != v)
110 				continue;
111 
112 			return (1);
113 		}
114 	}
115 
116 	return (0);
117 }
118 
119 void
120 srp_v_dtor(struct srp_gc *srp_gc, void *v)
121 {
122 	(*srp_gc->srp_gc_dtor)(srp_gc->srp_gc_cookie, v);
123 
124 	if (atomic_dec_int_nv(&srp_gc->srp_gc_refcount) == 0)
125 		wakeup_one(&srp_gc->srp_gc_refcount);
126 }
127 
128 void
129 srp_v_gc_start(struct srp_gc *srp_gc, struct srp *srp, void *v)
130 {
131 	struct srp_gc_ctx *ctx;
132 
133 	if (!srp_v_referenced(srp, v)) {
134 		/* we win */
135 		srp_v_dtor(srp_gc, v);
136 		return;
137 	}
138 
139 	/* in use, try later */
140 
141 	ctx = pool_get(&srp_gc_ctx_pool, PR_WAITOK);
142 	ctx->srp_gc = srp_gc;
143 	ctx->hzrd.sh_p = srp;
144 	ctx->hzrd.sh_v = v;
145 
146 	timeout_set(&ctx->tick, srp_v_gc, ctx);
147 	timeout_add(&ctx->tick, 1);
148 }
149 
150 void
151 srp_v_gc(void *x)
152 {
153 	struct srp_gc_ctx *ctx = x;
154 
155 	if (srp_v_referenced(ctx->hzrd.sh_p, ctx->hzrd.sh_v)) {
156 		/* oh well, try again later */
157 		timeout_add(&ctx->tick, 1);
158 		return;
159 	}
160 
161 	srp_v_dtor(ctx->srp_gc, ctx->hzrd.sh_v);
162 	pool_put(&srp_gc_ctx_pool, ctx);
163 }
164 
165 void
166 srp_update(struct srp_gc *srp_gc, struct srp *srp, void *v)
167 {
168 	if (v != NULL)
169 		atomic_inc_int(&srp_gc->srp_gc_refcount);
170 
171 	v = atomic_swap_ptr(&srp->ref, v);
172 	if (v != NULL)
173 		srp_v_gc_start(srp_gc, srp, v);
174 }
175 
176 void
177 srp_finalize(struct srp_gc *srp_gc)
178 {
179 	struct sleep_state sls;
180 	u_int r;
181 
182 	r = atomic_dec_int_nv(&srp_gc->srp_gc_refcount);
183 	while (r > 0) {
184 		sleep_setup(&sls, &srp_gc->srp_gc_refcount, PWAIT, "srpfini");
185 		r = srp_gc->srp_gc_refcount;
186 		sleep_finish(&sls, r);
187 	}
188 }
189 
190 void *
191 srp_enter(struct srp *srp)
192 {
193 	struct cpu_info *ci = curcpu();
194 	struct srp_hazard *hzrd;
195 	void *v;
196 	u_int i;
197 
198 	for (i = 0; i < nitems(ci->ci_srp_hazards); i++) {
199 		hzrd = &ci->ci_srp_hazards[i];
200 		if (hzrd->sh_p == NULL)
201 			break;
202 	}
203 	if (__predict_false(i == nitems(ci->ci_srp_hazards)))
204 		panic("%s: not enough srp hazard records", __func__);
205 
206 	hzrd->sh_p = srp;
207 	membar_producer();
208 
209 	/*
210 	 * ensure we update this cpu's hazard pointer to a value that's still
211 	 * current after the store finishes, otherwise the gc task may already
212 	 * be destroying it
213 	 */
214 	do {
215 		v = srp->ref;
216 		hzrd->sh_v = v;
217 		membar_consumer();
218 	} while (__predict_false(v != srp->ref));
219 
220 	return (v);
221 }
222 
223 void
224 srp_leave(struct srp *srp, void *v)
225 {
226 	struct cpu_info *ci = curcpu();
227 	struct srp_hazard *hzrd;
228 	u_int i;
229 
230 	for (i = 0; i < nitems(ci->ci_srp_hazards); i++) {
231 		hzrd = &ci->ci_srp_hazards[i];
232 		if (hzrd->sh_p == srp) {
233 			hzrd->sh_p = NULL;
234 			hzrd->sh_v = NULL;
235 			return;
236 		}
237 	}
238 
239 	panic("%s: unexpected ref %p via %p", __func__, v, srp);
240 }
241 
242 #else /* MULTIPROCESSOR */
243 
244 void
245 srp_startup(void)
246 {
247 
248 }
249 
250 void
251 srp_finalize(struct srp_gc *srp_gc)
252 {
253 	KASSERT(srp_gc->srp_gc_refcount == 1);
254 
255 	srp_gc->srp_gc_refcount--;
256 }
257 
258 void
259 srp_v_gc_start(struct srp_gc *srp_gc, struct srp *srp, void *v)
260 {
261 	(*srp_gc->srp_gc_dtor)(srp_gc->srp_gc_cookie, v);
262 	srp_gc->srp_gc_refcount--;
263 }
264 
265 #endif /* MULTIPROCESSOR */
266