1 /* $OpenBSD: srp.h,v 1.15 2020/05/09 10:18:27 jca Exp $ */
2
3 /*
4 * Copyright (c) 2014 Jonathan Matthew <jmatthew@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 #ifndef _SYS_SRP_H_
20 #define _SYS_SRP_H_
21
22 #include <sys/refcnt.h>
23
24 #ifndef __upunused
25 #ifdef MULTIPROCESSOR
26 #define __upunused
27 #else
28 #define __upunused __attribute__((__unused__))
29 #endif
30 #endif /* __upunused */
31
32 struct srp {
33 void *ref;
34 };
35
36 #define SRP_INITIALIZER() { NULL }
37
38 struct srp_hazard {
39 struct srp *sh_p;
40 void *sh_v;
41 };
42
43 struct srp_ref {
44 struct srp_hazard *hz;
45 } __upunused;
46
47 #define SRP_HAZARD_NUM 16
48
49 struct srp_gc {
50 void (*srp_gc_dtor)(void *, void *);
51 void *srp_gc_cookie;
52 struct refcnt srp_gc_refcnt;
53 };
54
55 #define SRP_GC_INITIALIZER(_d, _c) { (_d), (_c), REFCNT_INITIALIZER() }
56
57 /*
58 * singly linked list built by following srps
59 */
60
61 struct srpl_rc {
62 void (*srpl_ref)(void *, void *);
63 struct srp_gc srpl_gc;
64 };
65 #define srpl_cookie srpl_gc.srp_gc_cookie
66
67 #define SRPL_RC_INITIALIZER(_r, _u, _c) { _r, SRP_GC_INITIALIZER(_u, _c) }
68
69 struct srpl {
70 struct srp sl_head;
71 };
72
73 #define SRPL_HEAD(name, type) struct srpl
74
75 #define SRPL_ENTRY(type) \
76 struct { \
77 struct srp se_next; \
78 }
79
80 #ifdef _KERNEL
81
82 void srp_startup(void);
83 void srp_gc_init(struct srp_gc *, void (*)(void *, void *), void *);
84 void *srp_swap_locked(struct srp *, void *);
85 void srp_update_locked(struct srp_gc *, struct srp *, void *);
86 void *srp_get_locked(struct srp *);
87 void srp_gc_finalize(struct srp_gc *);
88
89 void srp_init(struct srp *);
90
91 #ifdef MULTIPROCESSOR
92 void *srp_swap(struct srp *, void *);
93 void srp_update(struct srp_gc *, struct srp *, void *);
94 void srp_finalize(void *, const char *);
95 void *srp_enter(struct srp_ref *, struct srp *);
96 void *srp_follow(struct srp_ref *, struct srp *);
97 void srp_leave(struct srp_ref *);
98 #else /* MULTIPROCESSOR */
99
100 static inline void *
srp_enter(struct srp_ref * sr,struct srp * srp)101 srp_enter(struct srp_ref *sr, struct srp *srp)
102 {
103 sr->hz = NULL;
104 return srp->ref;
105 }
106
107 #define srp_swap(_srp, _v) srp_swap_locked((_srp), (_v))
108 #define srp_update(_gc, _srp, _v) srp_update_locked((_gc), (_srp), (_v))
109 #define srp_finalize(_v, _wchan) ((void)0)
110 #define srp_follow(_sr, _srp) srp_enter(_sr, _srp)
111 #define srp_leave(_sr) do { } while (0)
112 #endif /* MULTIPROCESSOR */
113
114
115 void srpl_rc_init(struct srpl_rc *, void (*)(void *, void *),
116 void (*)(void *, void *), void *);
117
118 #define SRPL_INIT(_sl) srp_init(&(_sl)->sl_head)
119
120 #define SRPL_FIRST(_sr, _sl) srp_enter((_sr), &(_sl)->sl_head)
121 #define SRPL_NEXT(_sr, _e, _ENTRY) srp_enter((_sr), &(_e)->_ENTRY.se_next)
122 #define SRPL_FOLLOW(_sr, _e, _ENTRY) srp_follow((_sr), &(_e)->_ENTRY.se_next)
123
124 #define SRPL_FOREACH(_c, _sr, _sl, _ENTRY) \
125 for ((_c) = SRPL_FIRST(_sr, _sl); \
126 (_c) != NULL; \
127 (_c) = SRPL_FOLLOW(_sr, _c, _ENTRY))
128
129 #define SRPL_LEAVE(_sr) srp_leave((_sr))
130
131 #define SRPL_FIRST_LOCKED(_sl) srp_get_locked(&(_sl)->sl_head)
132 #define SRPL_EMPTY_LOCKED(_sl) (SRPL_FIRST_LOCKED(_sl) == NULL)
133
134 #define SRPL_NEXT_LOCKED(_e, _ENTRY) \
135 srp_get_locked(&(_e)->_ENTRY.se_next)
136
137 #define SRPL_FOREACH_LOCKED(_c, _sl, _ENTRY) \
138 for ((_c) = SRPL_FIRST_LOCKED(_sl); \
139 (_c) != NULL; \
140 (_c) = SRPL_NEXT_LOCKED((_c), _ENTRY))
141
142 #define SRPL_FOREACH_SAFE_LOCKED(_c, _sl, _ENTRY, _tc) \
143 for ((_c) = SRPL_FIRST_LOCKED(_sl); \
144 (_c) && ((_tc) = SRPL_NEXT_LOCKED(_c, _ENTRY), 1); \
145 (_c) = (_tc))
146
147 #define SRPL_INSERT_HEAD_LOCKED(_rc, _sl, _e, _ENTRY) do { \
148 void *head; \
149 \
150 srp_init(&(_e)->_ENTRY.se_next); \
151 \
152 head = SRPL_FIRST_LOCKED(_sl); \
153 if (head != NULL) { \
154 (_rc)->srpl_ref(&(_rc)->srpl_cookie, head); \
155 srp_update_locked(&(_rc)->srpl_gc, \
156 &(_e)->_ENTRY.se_next, head); \
157 } \
158 \
159 (_rc)->srpl_ref(&(_rc)->srpl_cookie, _e); \
160 srp_update_locked(&(_rc)->srpl_gc, &(_sl)->sl_head, (_e)); \
161 } while (0)
162
163 #define SRPL_INSERT_AFTER_LOCKED(_rc, _se, _e, _ENTRY) do { \
164 void *next; \
165 \
166 srp_init(&(_e)->_ENTRY.se_next); \
167 \
168 next = SRPL_NEXT_LOCKED(_se, _ENTRY); \
169 if (next != NULL) { \
170 (_rc)->srpl_ref(&(_rc)->srpl_cookie, next); \
171 srp_update_locked(&(_rc)->srpl_gc, \
172 &(_e)->_ENTRY.se_next, next); \
173 } \
174 \
175 (_rc)->srpl_ref(&(_rc)->srpl_cookie, _e); \
176 srp_update_locked(&(_rc)->srpl_gc, \
177 &(_se)->_ENTRY.se_next, (_e)); \
178 } while (0)
179
180 #define SRPL_REMOVE_LOCKED(_rc, _sl, _e, _type, _ENTRY) do { \
181 struct srp *ref; \
182 struct _type *c, *n; \
183 \
184 ref = &(_sl)->sl_head; \
185 while ((c = srp_get_locked(ref)) != (_e)) \
186 ref = &c->_ENTRY.se_next; \
187 \
188 n = SRPL_NEXT_LOCKED(c, _ENTRY); \
189 if (n != NULL) \
190 (_rc)->srpl_ref(&(_rc)->srpl_cookie, n); \
191 srp_update_locked(&(_rc)->srpl_gc, ref, n); \
192 srp_update_locked(&(_rc)->srpl_gc, &c->_ENTRY.se_next, NULL); \
193 } while (0)
194
195 #endif /* _KERNEL */
196
197 #endif /* _SYS_SRP_H_ */
198