xref: /netbsd-src/external/gpl2/lvm2/dist/lib/locking/locking.c (revision 404fbe5fb94ca1e054339640cabb2801ce52dd30)
1 /*	$NetBSD: locking.c,v 1.1.1.1 2008/12/22 00:18:04 haad Exp $	*/
2 
3 /*
4  * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
5  * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
6  *
7  * This file is part of LVM2.
8  *
9  * This copyrighted material is made available to anyone wishing to use,
10  * modify, copy, or redistribute it subject to the terms and conditions
11  * of the GNU Lesser General Public License v.2.1.
12  *
13  * You should have received a copy of the GNU Lesser General Public License
14  * along with this program; if not, write to the Free Software Foundation,
15  * Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
16  */
17 
18 #include "lib.h"
19 #include "locking.h"
20 #include "locking_types.h"
21 #include "lvm-string.h"
22 #include "activate.h"
23 #include "toolcontext.h"
24 #include "memlock.h"
25 #include "defaults.h"
26 #include "lvmcache.h"
27 
28 #include <assert.h>
29 #include <signal.h>
30 #include <sys/stat.h>
31 #include <limits.h>
32 #include <unistd.h>
33 
34 static struct locking_type _locking;
35 static sigset_t _oldset;
36 
37 static int _vg_lock_count = 0;		/* Number of locks held */
38 static int _vg_write_lock_held = 0;	/* VG write lock held? */
39 static int _signals_blocked = 0;
40 
41 static volatile sig_atomic_t _sigint_caught = 0;
42 static volatile sig_atomic_t _handler_installed;
43 static struct sigaction _oldhandler;
44 static int _oldmasked;
45 
46 static void _catch_sigint(int unused __attribute__((unused)))
47 {
48 	_sigint_caught = 1;
49 }
50 
51 int sigint_caught(void) {
52 	return _sigint_caught;
53 }
54 
55 void sigint_clear(void)
56 {
57 	_sigint_caught = 0;
58 }
59 
60 /*
61  * Temporarily allow keyboard interrupts to be intercepted and noted;
62  * saves interrupt handler state for sigint_restore().  Users should
63  * use the sigint_caught() predicate to check whether interrupt was
64  * requested and act appropriately.  Interrupt flags are never
65  * cleared automatically by this code, but the tools clear the flag
66  * before running each command in lvm_run_command().  All other places
67  * where the flag needs to be cleared need to call sigint_clear().
68  */
69 
70 void sigint_allow(void)
71 {
72 	struct sigaction handler;
73 	sigset_t sigs;
74 
75 	/*
76 	 * Do not overwrite the backed-up handler data -
77 	 * just increase nesting count.
78 	 */
79 	if (_handler_installed) {
80 		_handler_installed++;
81 		return;
82 	}
83 
84 	/* Grab old sigaction for SIGINT: shall not fail. */
85 	sigaction(SIGINT, NULL, &handler);
86 	handler.sa_flags &= ~SA_RESTART; /* Clear restart flag */
87 	handler.sa_handler = _catch_sigint;
88 
89 	_handler_installed = 1;
90 
91 	/* Override the signal handler: shall not fail. */
92 	sigaction(SIGINT, &handler, &_oldhandler);
93 
94 	/* Unmask SIGINT.  Remember to mask it again on restore. */
95 	sigprocmask(0, NULL, &sigs);
96 	if ((_oldmasked = sigismember(&sigs, SIGINT))) {
97 		sigdelset(&sigs, SIGINT);
98 		sigprocmask(SIG_SETMASK, &sigs, NULL);
99 	}
100 }
101 
102 void sigint_restore(void)
103 {
104 	if (!_handler_installed)
105 		return;
106 
107 	if (_handler_installed > 1) {
108 		_handler_installed--;
109 		return;
110 	}
111 
112 	/* Nesting count went down to 0. */
113 	_handler_installed = 0;
114 
115 	if (_oldmasked) {
116 		sigset_t sigs;
117 		sigprocmask(0, NULL, &sigs);
118 		sigaddset(&sigs, SIGINT);
119 		sigprocmask(SIG_SETMASK, &sigs, NULL);
120 	}
121 
122 	sigaction(SIGINT, &_oldhandler, NULL);
123 }
124 
125 static void _block_signals(uint32_t flags __attribute((unused)))
126 {
127 	sigset_t set;
128 
129 	if (_signals_blocked)
130 		return;
131 
132 	if (sigfillset(&set)) {
133 		log_sys_error("sigfillset", "_block_signals");
134 		return;
135 	}
136 
137 	if (sigprocmask(SIG_SETMASK, &set, &_oldset)) {
138 		log_sys_error("sigprocmask", "_block_signals");
139 		return;
140 	}
141 
142 	_signals_blocked = 1;
143 
144 	return;
145 }
146 
147 static void _unblock_signals(void)
148 {
149 	/* Don't unblock signals while any locks are held */
150 	if (!_signals_blocked || _vg_lock_count)
151 		return;
152 
153 	if (sigprocmask(SIG_SETMASK, &_oldset, NULL)) {
154 		log_sys_error("sigprocmask", "_block_signals");
155 		return;
156 	}
157 
158 	_signals_blocked = 0;
159 
160 	return;
161 }
162 
163 static void _lock_memory(uint32_t flags)
164 {
165 	if (!(_locking.flags & LCK_PRE_MEMLOCK))
166 		return;
167 
168 	if ((flags & (LCK_SCOPE_MASK | LCK_TYPE_MASK)) == LCK_LV_SUSPEND)
169 		memlock_inc();
170 }
171 
172 static void _unlock_memory(uint32_t flags)
173 {
174 	if (!(_locking.flags & LCK_PRE_MEMLOCK))
175 		return;
176 
177 	if ((flags & (LCK_SCOPE_MASK | LCK_TYPE_MASK)) == LCK_LV_RESUME)
178 		memlock_dec();
179 }
180 
181 void reset_locking(void)
182 {
183 	int was_locked = _vg_lock_count;
184 
185 	_vg_lock_count = 0;
186 	_vg_write_lock_held = 0;
187 
188 	_locking.reset_locking();
189 
190 	if (was_locked)
191 		_unblock_signals();
192 }
193 
194 static void _update_vg_lock_count(uint32_t flags)
195 {
196 	if ((flags & LCK_SCOPE_MASK) != LCK_VG)
197 		return;
198 
199 	if ((flags & LCK_TYPE_MASK) == LCK_UNLOCK)
200 		_vg_lock_count--;
201 	else
202 		_vg_lock_count++;
203 
204 	/* We don't bother to reset this until all VG locks are dropped */
205 	if ((flags & LCK_TYPE_MASK) == LCK_WRITE)
206 		_vg_write_lock_held = 1;
207 	else if (!_vg_lock_count)
208 		_vg_write_lock_held = 0;
209 }
210 
211 /*
212  * Select a locking type
213  */
214 int init_locking(int type, struct cmd_context *cmd)
215 {
216 	init_lockingfailed(0);
217 
218 	switch (type) {
219 	case 0:
220 		init_no_locking(&_locking, cmd);
221 		log_warn("WARNING: Locking disabled. Be careful! "
222 			  "This could corrupt your metadata.");
223 		return 1;
224 
225 	case 1:
226 		log_very_verbose("File-based locking selected.");
227 		if (!init_file_locking(&_locking, cmd))
228 			break;
229 		return 1;
230 
231 #ifdef HAVE_LIBDL
232 	case 2:
233 		if (!cmd->is_static) {
234 			log_very_verbose("External locking selected.");
235 			if (init_external_locking(&_locking, cmd))
236 				return 1;
237 		}
238 		if (!find_config_tree_int(cmd, "locking/fallback_to_clustered_locking",
239 			    find_config_tree_int(cmd, "global/fallback_to_clustered_locking",
240 						 DEFAULT_FALLBACK_TO_CLUSTERED_LOCKING)))
241 			break;
242 #endif
243 
244 #ifdef CLUSTER_LOCKING_INTERNAL
245 		log_very_verbose("Falling back to internal clustered locking.");
246 		/* Fall through */
247 
248 	case 3:
249 		log_very_verbose("Cluster locking selected.");
250 		if (!init_cluster_locking(&_locking, cmd))
251 			break;
252 		return 1;
253 #endif
254 
255 	default:
256 		log_error("Unknown locking type requested.");
257 		return 0;
258 	}
259 
260 	if ((type == 2 || type == 3) &&
261 	    find_config_tree_int(cmd, "locking/fallback_to_local_locking",
262 	    	    find_config_tree_int(cmd, "global/fallback_to_local_locking",
263 					 DEFAULT_FALLBACK_TO_LOCAL_LOCKING))) {
264 		log_warn("WARNING: Falling back to local file-based locking.");
265 		log_warn("Volume Groups with the clustered attribute will "
266 			  "be inaccessible.");
267 		if (init_file_locking(&_locking, cmd))
268 			return 1;
269 	}
270 
271 	if (!ignorelockingfailure())
272 		return 0;
273 
274 	/* FIXME Ensure only read ops are permitted */
275 	log_verbose("Locking disabled - only read operations permitted.");
276 
277 	init_no_locking(&_locking, cmd);
278 	init_lockingfailed(1);
279 
280 	return 1;
281 }
282 
283 void fin_locking(void)
284 {
285 	_locking.fin_locking();
286 }
287 
288 /*
289  * Does the LVM1 driver know of this VG name?
290  */
291 int check_lvm1_vg_inactive(struct cmd_context *cmd, const char *vgname)
292 {
293 	struct stat info;
294 	char path[PATH_MAX];
295 
296 	/* We'll allow operations on orphans */
297 	if (is_orphan_vg(vgname))
298 		return 1;
299 
300 	/* LVM1 is only present in 2.4 kernels. */
301 	if (strncmp(cmd->kernel_vsn, "2.4.", 4))
302 		return 1;
303 
304 	if (dm_snprintf(path, sizeof(path), "%s/lvm/VGs/%s", cmd->proc_dir,
305 			 vgname) < 0) {
306 		log_error("LVM1 proc VG pathname too long for %s", vgname);
307 		return 0;
308 	}
309 
310 	if (stat(path, &info) == 0) {
311 		log_error("%s exists: Is the original LVM driver using "
312 			  "this volume group?", path);
313 		return 0;
314 	} else if (errno != ENOENT && errno != ENOTDIR) {
315 		log_sys_error("stat", path);
316 		return 0;
317 	}
318 
319 	return 1;
320 }
321 
322 /*
323  * VG locking is by VG name.
324  * FIXME This should become VG uuid.
325  */
326 static int _lock_vol(struct cmd_context *cmd, const char *resource, uint32_t flags)
327 {
328 	int ret = 0;
329 
330 	_block_signals(flags);
331 	_lock_memory(flags);
332 
333 	assert(resource);
334 
335 	if (!*resource) {
336 		log_error("Internal error: Use of P_orphans is deprecated.");
337 		return 0;
338 	}
339 
340 	if (*resource == '#' && (flags & LCK_CACHE)) {
341 		log_error("Internal error: P_%s referenced", resource);
342 		return 0;
343 	}
344 
345 	if ((ret = _locking.lock_resource(cmd, resource, flags))) {
346 		if ((flags & LCK_SCOPE_MASK) == LCK_VG &&
347 		    !(flags & LCK_CACHE)) {
348 			if ((flags & LCK_TYPE_MASK) == LCK_UNLOCK)
349 				lvmcache_unlock_vgname(resource);
350 			else
351 				lvmcache_lock_vgname(resource, (flags & LCK_TYPE_MASK)
352 								== LCK_READ);
353 		}
354 
355 		_update_vg_lock_count(flags);
356 	}
357 
358 	_unlock_memory(flags);
359 	_unblock_signals();
360 
361 	return ret;
362 }
363 
364 int lock_vol(struct cmd_context *cmd, const char *vol, uint32_t flags)
365 {
366 	char resource[258] __attribute((aligned(8)));
367 
368 	if (flags == LCK_NONE) {
369 		log_debug("Internal error: %s: LCK_NONE lock requested", vol);
370 		return 1;
371 	}
372 
373 	switch (flags & LCK_SCOPE_MASK) {
374 	case LCK_VG:
375 		/* Lock VG to change on-disk metadata. */
376 		/* If LVM1 driver knows about the VG, it can't be accessed. */
377 		if (!check_lvm1_vg_inactive(cmd, vol))
378 			return 0;
379 	case LCK_LV:
380 		/* Suspend LV if it's active. */
381 		strncpy(resource, vol, sizeof(resource));
382 		break;
383 	default:
384 		log_error("Unrecognised lock scope: %d",
385 			  flags & LCK_SCOPE_MASK);
386 		return 0;
387 	}
388 
389 	if (!_lock_vol(cmd, resource, flags))
390 		return 0;
391 
392 	/*
393 	 * If a real lock was acquired (i.e. not LCK_CACHE),
394 	 * perform an immediate unlock unless LCK_HOLD was requested.
395 	 */
396 	if (!(flags & LCK_CACHE) && !(flags & LCK_HOLD) &&
397 	    ((flags & LCK_TYPE_MASK) != LCK_UNLOCK)) {
398 		if (!_lock_vol(cmd, resource,
399 			       (flags & ~LCK_TYPE_MASK) | LCK_UNLOCK))
400 			return 0;
401 	}
402 
403 	return 1;
404 }
405 
406 /* Unlock list of LVs */
407 int resume_lvs(struct cmd_context *cmd, struct dm_list *lvs)
408 {
409 	struct lv_list *lvl;
410 
411 	dm_list_iterate_items(lvl, lvs)
412 		resume_lv(cmd, lvl->lv);
413 
414 	return 1;
415 }
416 
417 /* Lock a list of LVs */
418 int suspend_lvs(struct cmd_context *cmd, struct dm_list *lvs)
419 {
420 	struct dm_list *lvh;
421 	struct lv_list *lvl;
422 
423 	dm_list_iterate_items(lvl, lvs) {
424 		if (!suspend_lv(cmd, lvl->lv)) {
425 			log_error("Failed to suspend %s", lvl->lv->name);
426 			dm_list_uniterate(lvh, lvs, &lvl->list) {
427 				lvl = dm_list_item(lvh, struct lv_list);
428 				resume_lv(cmd, lvl->lv);
429 			}
430 
431 			return 0;
432 		}
433 	}
434 
435 	return 1;
436 }
437 
438 /* Lock a list of LVs */
439 int activate_lvs(struct cmd_context *cmd, struct dm_list *lvs, unsigned exclusive)
440 {
441 	struct dm_list *lvh;
442 	struct lv_list *lvl;
443 
444 	dm_list_iterate_items(lvl, lvs) {
445 		if (!exclusive) {
446 			if (!activate_lv(cmd, lvl->lv)) {
447 				log_error("Failed to activate %s", lvl->lv->name);
448 				return 0;
449 			}
450 		} else if (!activate_lv_excl(cmd, lvl->lv)) {
451 			log_error("Failed to activate %s", lvl->lv->name);
452 			dm_list_uniterate(lvh, lvs, &lvl->list) {
453 				lvl = dm_list_item(lvh, struct lv_list);
454 				activate_lv(cmd, lvl->lv);
455 			}
456 			return 0;
457 		}
458 	}
459 
460 	return 1;
461 }
462 
463 int vg_write_lock_held(void)
464 {
465 	return _vg_write_lock_held;
466 }
467 
468 int locking_is_clustered(void)
469 {
470 	return (_locking.flags & LCK_CLUSTERED) ? 1 : 0;
471 }
472 
473