1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright (c) 2001 by Sun Microsystems, Inc.
24 * All rights reserved.
25 */
26
27 #pragma ident "%Z%%M% %I% %E% SMI"
28
29 /*
30 * Doors-daemon (dsvclockd) synchronization strategy: contacts a standalone
31 * daemon to coordinate access to the shared resource across multiple
32 * processes and multiple threads within a process. Performance is slow
33 * (about 1200 locks and unlocks per second on a Ultra 170E/167 MHz) but it
34 * provides robust locks and scales well as the number of CPUs increase.
35 */
36
37 #include <sys/types.h>
38 #include <sys/mman.h>
39 #include <sys/wait.h>
40 #include <fcntl.h>
41 #include <unistd.h>
42 #include <dsvclockd.h>
43 #include <door.h>
44 #include <stdlib.h>
45 #include <errno.h>
46 #include <string.h>
47 #include <stdio.h>
48 #include <dhcp_svc_private.h>
49
50 static int dsvcd_lock(dsvc_synch_t *, dsvcd_locktype_t, void **);
51
52 /*
53 * Our synchronization-private data which hangs off of sp->s_data; This
54 * data is thus per-open-container-instance and (of course) per-process.
55 */
56 typedef struct {
57 int s_lockfd; /* door lock request fd */
58 boolean_t s_crosshost; /* request crosshost synch */
59 } dsvcd_synch_t;
60
61 /*
62 * Initialize the dsvclockd synchronization strategy for an open container,
63 * whose synchronization information ("synchronization instance") is
64 * pointed to by `sp', by opening the door to the dsvclockd. On success,
65 * hang our synchronization-private data off of `sp->s_data'. Returns a
66 * DSVC_* code.
67 */
68 static int
dsvcd_init(dsvc_synch_t * sp,unsigned int synchflags)69 dsvcd_init(dsvc_synch_t *sp, unsigned int synchflags)
70 {
71 dsvcd_synch_t *dsp;
72 char doorpath[MAXPATHLEN];
73 door_info_t info;
74 unsigned int tries;
75 pid_t dsvclockd_pid;
76 int fd;
77
78 if (geteuid() != 0)
79 return (DSVC_ACCESS);
80
81 dsp = malloc(sizeof (dsvcd_synch_t));
82 sp->s_data = dsp;
83 if (dsp == NULL)
84 return (DSVC_NO_MEMORY);
85
86 (void) snprintf(doorpath, MAXPATHLEN, DSVCD_DOOR_FMT,
87 sp->s_datastore->d_resource);
88
89 dsp->s_lockfd = -1;
90 dsp->s_crosshost = (synchflags & DSVC_SYNCH_CROSSHOST) != 0;
91
92 fd = open(doorpath, O_RDONLY);
93 if (fd == -1) {
94 if (errno == EACCES) {
95 free(dsp);
96 sp->s_data = NULL;
97 return (DSVC_ACCESS);
98 }
99 } else {
100 if (door_info(fd, &info) == 0 && info.di_target != -1) {
101 dsp->s_lockfd = fd;
102 return (DSVC_SUCCESS);
103 }
104 (void) close(fd);
105 }
106
107 switch (dsvclockd_pid = fork()) {
108 case -1:
109 break;
110 case 0:
111 /*
112 * Close all descriptors so messages don't leak through.
113 */
114 (void) closefrom(0);
115
116 /*
117 * It's okay if the exec fails; the `default' case below
118 * will give up and return DSVC_NO_LOCKMGR.
119 */
120 (void) execl(DSVCD_PATH, DSVCD_PATH, (char *)0);
121 _exit(EXIT_FAILURE);
122 default:
123 /*
124 * Make five attempts to open the dsvclockd door, each
125 * spaced a half second apart.
126 */
127 for (tries = 0; tries < 5; tries++) {
128 fd = open(doorpath, O_RDONLY);
129 if (fd != -1) {
130 if (door_info(fd, &info) == 0 &&
131 info.di_target != -1) {
132 (void) waitpid(dsvclockd_pid, NULL, 0);
133 dsp->s_lockfd = fd;
134 return (DSVC_SUCCESS);
135 }
136 (void) close(fd);
137 }
138 (void) poll(NULL, 0, 500);
139 }
140 (void) waitpid(dsvclockd_pid, NULL, 0);
141 break;
142 }
143
144 free(dsp);
145 sp->s_data = NULL;
146 return (DSVC_NO_LOCKMGR);
147 }
148
149 /*
150 * Finish using the dsvclockd synchronization strategy on synchronization
151 * instance `sp'.
152 */
153 static void
dsvcd_fini(dsvc_synch_t * sp)154 dsvcd_fini(dsvc_synch_t *sp)
155 {
156 dsvcd_synch_t *dsp = sp->s_data;
157
158 sp->s_data = NULL;
159 (void) close(dsp->s_lockfd);
160 free(dsp);
161 }
162
163 /*
164 * Obtain a shared lock on synchronization instance `sp'. Upon success,
165 * `unlock_cookiep' is set to a token to pass to `dsvcd_unlock' to unlock
166 * the lock. Returns a DSVC_* code.
167 */
168 static int
dsvcd_rdlock(dsvc_synch_t * sp,void ** unlock_cookiep)169 dsvcd_rdlock(dsvc_synch_t *sp, void **unlock_cookiep)
170 {
171 return (dsvcd_lock(sp, DSVCD_RDLOCK, unlock_cookiep));
172 }
173
174 /*
175 * Obtain an exclusive lock on synchronization instance `sp'. Upon
176 * success, `unlock_cookiep' is set to a token to pass to `dsvcd_unlock' to
177 * unlock the lock. Returns a DSVC_* code.
178 */
179 static int
dsvcd_wrlock(dsvc_synch_t * sp,void ** unlock_cookiep)180 dsvcd_wrlock(dsvc_synch_t *sp, void **unlock_cookiep)
181 {
182 return (dsvcd_lock(sp, DSVCD_WRLOCK, unlock_cookiep));
183 }
184
185 /*
186 * Lock the synchronization instance `sp' with a lock of type `locktype'.
187 * Upon success, `unlock_cookiep' is set to point to a door descriptor
188 * which is used to unlock the lock and to detect if the caller dies
189 * holding the lock. Returns a DSVC_* code.
190 */
191 static int
dsvcd_lock(dsvc_synch_t * sp,dsvcd_locktype_t locktype,void ** unlock_cookiep)192 dsvcd_lock(dsvc_synch_t *sp, dsvcd_locktype_t locktype, void **unlock_cookiep)
193 {
194 door_arg_t args;
195 dsvcd_lock_request_t request;
196 dsvcd_reply_t reply;
197 door_desc_t *descp;
198 int unlockfd;
199 int i;
200 dsvcd_synch_t *dsp = sp->s_data;
201
202 if (dsp->s_lockfd == -1)
203 return (DSVC_NO_LOCKMGR);
204
205 request.lrq_request.rq_version = DSVCD_DOOR_VERSION;
206 request.lrq_request.rq_reqtype = DSVCD_LOCK;
207 request.lrq_locktype = locktype;
208 request.lrq_nonblock = sp->s_nonblock;
209 request.lrq_crosshost = dsp->s_crosshost;
210 request.lrq_conver = sp->s_datastore->d_conver;
211
212 (void) strlcpy(request.lrq_loctoken, sp->s_loctoken,
213 sizeof (request.lrq_loctoken));
214 (void) strlcpy(request.lrq_conname, sp->s_conname,
215 sizeof (request.lrq_conname));
216
217 args.data_ptr = (char *)&request;
218 args.data_size = sizeof (dsvcd_lock_request_t);
219 args.desc_ptr = NULL;
220 args.desc_num = 0;
221 args.rbuf = (char *)&reply;
222 args.rsize = sizeof (dsvcd_reply_t);
223
224 if (door_call(dsp->s_lockfd, &args) == -1) {
225 /*
226 * If the lock manager went away, we'll get back EBADF.
227 */
228 return (errno == EBADF ? DSVC_NO_LOCKMGR : DSVC_SYNCH_ERR);
229 }
230
231 descp = args.desc_ptr;
232 if (args.desc_num == 0)
233 unlockfd = -1;
234 else {
235 unlockfd = descp->d_data.d_desc.d_descriptor;
236
237 /*
238 * There shouldn't be more than one descriptor, but close
239 * any extras to ease future compatibility.
240 */
241 for (i = 1; i < args.desc_num; i++)
242 (void) close(descp[i].d_data.d_desc.d_descriptor);
243 }
244
245 if (args.rbuf != (char *)&reply) {
246 (void) memcpy(&reply, args.rbuf, sizeof (reply));
247 (void) munmap(args.rbuf, args.rsize);
248 }
249
250 if (args.data_size != sizeof (dsvcd_reply_t) ||
251 reply.rp_version != DSVCD_DOOR_VERSION) {
252 (void) close(unlockfd);
253 return (DSVC_SYNCH_ERR);
254 }
255
256 if (reply.rp_retval == DSVC_SUCCESS && unlockfd == -1)
257 return (DSVC_SYNCH_ERR);
258
259 *unlock_cookiep = (void *)unlockfd;
260 return (reply.rp_retval);
261 }
262
263 /*
264 * Unlock the synchronization instance `sp' using the unlock token
265 * `unlock_cookiep'. Returns a DSVC_* code.
266 */
267 /* ARGSUSED */
268 static int
dsvcd_unlock(dsvc_synch_t * sp,void * unlock_cookie)269 dsvcd_unlock(dsvc_synch_t *sp, void *unlock_cookie)
270 {
271 door_arg_t args;
272 dsvcd_unlock_request_t request;
273 dsvcd_reply_t reply;
274 int unlockfd = (int)unlock_cookie;
275 int i;
276
277 request.urq_request.rq_version = DSVCD_DOOR_VERSION;
278 request.urq_request.rq_reqtype = DSVCD_UNLOCK;
279
280 args.data_ptr = (char *)&request;
281 args.data_size = sizeof (dsvcd_unlock_request_t);
282 args.desc_ptr = NULL;
283 args.desc_num = 0;
284 args.rbuf = (char *)&reply;
285 args.rsize = sizeof (dsvcd_reply_t);
286
287 if (door_call(unlockfd, &args) == -1) {
288 /*
289 * If the lock manager went away while we had a lock
290 * checked out, regard that as a synchronization error --
291 * it should never happen under correct operation.
292 */
293 return (DSVC_SYNCH_ERR);
294 }
295
296 /*
297 * There shouldn't be any descriptors returned from the server
298 * here, but this may change in the future -- close any to ease
299 * future compatibility.
300 */
301 for (i = 0; i < args.desc_num; i++)
302 (void) close(args.desc_ptr[i].d_data.d_desc.d_descriptor);
303
304 /*
305 * Close the unlock door even if the door_call() fails; this is so
306 * the container gets unlocked even if there's some screwup in the
307 * graceful unlocking protocol (in that case, this will generate
308 * a DOOR_UNREF_DATA call).
309 */
310 (void) close(unlockfd);
311
312 if (args.rbuf != (char *)&reply) {
313 (void) memcpy(&reply, args.rbuf, sizeof (reply));
314 (void) munmap(args.rbuf, args.rsize);
315 }
316
317 if (args.data_size != sizeof (dsvcd_reply_t) ||
318 reply.rp_version != DSVCD_DOOR_VERSION)
319 return (DSVC_SYNCH_ERR);
320
321 return (reply.rp_retval);
322 }
323
324 dsvc_synch_ops_t dsvcd_synch_ops = {
325 dsvcd_init, dsvcd_fini, dsvcd_rdlock, dsvcd_wrlock, dsvcd_unlock
326 };
327