1 /* $OpenBSD: kern_srp.c,v 1.11 2016/09/15 02:00:16 dlg Exp $ */ 2 3 /* 4 * Copyright (c) 2014 Jonathan Matthew <jmatthew@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/param.h> 20 #include <sys/types.h> 21 #include <sys/systm.h> 22 #include <sys/timeout.h> 23 #include <sys/srp.h> 24 #include <sys/atomic.h> 25 26 void srp_v_gc_start(struct srp_gc *, struct srp *, void *); 27 28 void 29 srpl_rc_init(struct srpl_rc *rc, void (*ref)(void *, void *), 30 void (*unref)(void *, void *), void *cookie) 31 { 32 rc->srpl_ref = ref; 33 srp_gc_init(&rc->srpl_gc, unref, cookie); 34 } 35 36 void 37 srp_gc_init(struct srp_gc *srp_gc, void (*dtor)(void *, void *), void *cookie) 38 { 39 srp_gc->srp_gc_dtor = dtor; 40 srp_gc->srp_gc_cookie = cookie; 41 refcnt_init(&srp_gc->srp_gc_refcnt); 42 } 43 44 void 45 srp_init(struct srp *srp) 46 { 47 srp->ref = NULL; 48 } 49 50 void * 51 srp_swap_locked(struct srp *srp, void *nv) 52 { 53 void *ov; 54 55 /* 56 * this doesn't have to be as careful as the caller has already 57 * prevented concurrent updates, eg. by holding the kernel lock. 58 * can't be mixed with non-locked updates though. 59 */ 60 61 ov = srp->ref; 62 srp->ref = nv; 63 64 return (ov); 65 } 66 67 void 68 srp_update_locked(struct srp_gc *srp_gc, struct srp *srp, void *v) 69 { 70 if (v != NULL) 71 refcnt_take(&srp_gc->srp_gc_refcnt); 72 73 v = srp_swap_locked(srp, v); 74 75 if (v != NULL) 76 srp_v_gc_start(srp_gc, srp, v); 77 } 78 79 void * 80 srp_get_locked(struct srp *srp) 81 { 82 return (srp->ref); 83 } 84 85 void 86 srp_gc_finalize(struct srp_gc *srp_gc) 87 { 88 refcnt_finalize(&srp_gc->srp_gc_refcnt, "srpfini"); 89 } 90 91 #ifdef MULTIPROCESSOR 92 #include <machine/cpu.h> 93 #include <sys/pool.h> 94 95 struct srp_gc_ctx { 96 struct srp_gc *srp_gc; 97 struct timeout tick; 98 struct srp_hazard hzrd; 99 }; 100 101 int srp_v_referenced(struct srp *, void *); 102 void srp_v_gc(void *); 103 104 struct pool srp_gc_ctx_pool; 105 106 void 107 srp_startup(void) 108 { 109 pool_init(&srp_gc_ctx_pool, sizeof(struct srp_gc_ctx), 0, 110 IPL_SOFTCLOCK, PR_WAITOK, "srpgc", NULL); 111 } 112 113 int 114 srp_v_referenced(struct srp *srp, void *v) 115 { 116 struct cpu_info *ci; 117 CPU_INFO_ITERATOR cii; 118 u_int i; 119 struct srp_hazard *hzrd; 120 121 CPU_INFO_FOREACH(cii, ci) { 122 for (i = 0; i < nitems(ci->ci_srp_hazards); i++) { 123 hzrd = &ci->ci_srp_hazards[i]; 124 125 if (hzrd->sh_p != srp) 126 continue; 127 membar_consumer(); 128 if (hzrd->sh_v != v) 129 continue; 130 131 return (1); 132 } 133 } 134 135 return (0); 136 } 137 138 void 139 srp_v_dtor(struct srp_gc *srp_gc, void *v) 140 { 141 (*srp_gc->srp_gc_dtor)(srp_gc->srp_gc_cookie, v); 142 143 refcnt_rele_wake(&srp_gc->srp_gc_refcnt); 144 } 145 146 void 147 srp_v_gc_start(struct srp_gc *srp_gc, struct srp *srp, void *v) 148 { 149 struct srp_gc_ctx *ctx; 150 151 if (!srp_v_referenced(srp, v)) { 152 /* we win */ 153 srp_v_dtor(srp_gc, v); 154 return; 155 } 156 157 /* in use, try later */ 158 159 ctx = pool_get(&srp_gc_ctx_pool, PR_WAITOK); 160 ctx->srp_gc = srp_gc; 161 ctx->hzrd.sh_p = srp; 162 ctx->hzrd.sh_v = v; 163 164 timeout_set(&ctx->tick, srp_v_gc, ctx); 165 timeout_add(&ctx->tick, 1); 166 } 167 168 void 169 srp_v_gc(void *x) 170 { 171 struct srp_gc_ctx *ctx = x; 172 173 if (srp_v_referenced(ctx->hzrd.sh_p, ctx->hzrd.sh_v)) { 174 /* oh well, try again later */ 175 timeout_add(&ctx->tick, 1); 176 return; 177 } 178 179 srp_v_dtor(ctx->srp_gc, ctx->hzrd.sh_v); 180 pool_put(&srp_gc_ctx_pool, ctx); 181 } 182 183 void * 184 srp_swap(struct srp *srp, void *v) 185 { 186 return (atomic_swap_ptr(&srp->ref, v)); 187 } 188 189 void 190 srp_update(struct srp_gc *srp_gc, struct srp *srp, void *v) 191 { 192 if (v != NULL) 193 refcnt_take(&srp_gc->srp_gc_refcnt); 194 195 v = srp_swap(srp, v); 196 if (v != NULL) 197 srp_v_gc_start(srp_gc, srp, v); 198 } 199 200 static inline void * 201 srp_v(struct srp_hazard *hzrd, struct srp *srp) 202 { 203 void *v; 204 205 hzrd->sh_p = srp; 206 207 /* 208 * ensure we update this cpu's hazard pointer to a value that's still 209 * current after the store finishes, otherwise the gc task may already 210 * be destroying it 211 */ 212 do { 213 v = srp->ref; 214 hzrd->sh_v = v; 215 membar_consumer(); 216 } while (__predict_false(v != srp->ref)); 217 218 return (v); 219 } 220 221 void * 222 srp_enter(struct srp_ref *sr, struct srp *srp) 223 { 224 struct cpu_info *ci = curcpu(); 225 struct srp_hazard *hzrd; 226 u_int i; 227 228 for (i = 0; i < nitems(ci->ci_srp_hazards); i++) { 229 hzrd = &ci->ci_srp_hazards[i]; 230 if (hzrd->sh_p == NULL) { 231 sr->hz = hzrd; 232 return (srp_v(hzrd, srp)); 233 } 234 } 235 236 panic("%s: not enough srp hazard records", __func__); 237 238 /* NOTREACHED */ 239 return (NULL); 240 } 241 242 void * 243 srp_follow(struct srp_ref *sr, struct srp *srp) 244 { 245 return (srp_v(sr->hz, srp)); 246 } 247 248 void 249 srp_leave(struct srp_ref *sr) 250 { 251 sr->hz->sh_p = NULL; 252 } 253 254 static inline int 255 srp_referenced(void *v) 256 { 257 struct cpu_info *ci; 258 CPU_INFO_ITERATOR cii; 259 u_int i; 260 struct srp_hazard *hzrd; 261 262 CPU_INFO_FOREACH(cii, ci) { 263 for (i = 0; i < nitems(ci->ci_srp_hazards); i++) { 264 hzrd = &ci->ci_srp_hazards[i]; 265 266 if (hzrd->sh_p != NULL && hzrd->sh_v == v) 267 return (1); 268 } 269 } 270 271 return (0); 272 } 273 274 void 275 srp_finalize(void *v, const char *wmesg) 276 { 277 while (srp_referenced(v)) 278 tsleep(v, PWAIT, wmesg, 1); 279 } 280 281 #else /* MULTIPROCESSOR */ 282 283 void 284 srp_startup(void) 285 { 286 287 } 288 289 void 290 srp_v_gc_start(struct srp_gc *srp_gc, struct srp *srp, void *v) 291 { 292 (*srp_gc->srp_gc_dtor)(srp_gc->srp_gc_cookie, v); 293 refcnt_rele_wake(&srp_gc->srp_gc_refcnt); 294 } 295 296 #endif /* MULTIPROCESSOR */ 297