xref: /netbsd-src/external/gpl2/lvm2/dist/daemons/clvmd/lvm-functions.c (revision de4fa6c51a9708fc05f88b618fa6fad87c9508ec)
1 /*	$NetBSD: lvm-functions.c,v 1.1.1.2 2009/02/18 11:16:40 haad Exp $	*/
2 
3 /*
4  * Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved.
5  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
6  *
7  * This file is part of LVM2.
8  *
9  * This copyrighted material is made available to anyone wishing to use,
10  * modify, copy, or redistribute it subject to the terms and conditions
11  * of the GNU General Public License v.2.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software Foundation,
15  * Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
16  */
17 
18 #define _GNU_SOURCE
19 #define _FILE_OFFSET_BITS 64
20 
21 #include <configure.h>
22 #include <pthread.h>
23 #include <sys/types.h>
24 #include <sys/utsname.h>
25 #include <sys/ioctl.h>
26 #include <sys/socket.h>
27 #include <sys/stat.h>
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <stdint.h>
31 #include <fcntl.h>
32 #include <string.h>
33 #include <stddef.h>
34 #include <stdint.h>
35 #include <unistd.h>
36 #include <errno.h>
37 #include <syslog.h>
38 #include <assert.h>
39 #include <libdevmapper.h>
40 #include <libdlm.h>
41 
42 #include "lvm-types.h"
43 #include "clvm.h"
44 #include "clvmd-comms.h"
45 #include "clvmd.h"
46 #include "lvm-functions.h"
47 
48 /* LVM2 headers */
49 #include "toolcontext.h"
50 #include "lvmcache.h"
51 #include "lvm-logging.h"
52 #include "lvm-globals.h"
53 #include "activate.h"
54 #include "locking.h"
55 #include "archiver.h"
56 #include "defaults.h"
57 
58 static struct cmd_context *cmd = NULL;
59 static struct dm_hash_table *lv_hash = NULL;
60 static pthread_mutex_t lv_hash_lock;
61 static pthread_mutex_t lvm_lock;
62 static char last_error[1024];
63 static int suspended = 0;
64 
65 struct lv_info {
66 	int lock_id;
67 	int lock_mode;
68 };
69 
70 #define LCK_MASK (LCK_TYPE_MASK | LCK_SCOPE_MASK)
71 
72 static const char *decode_locking_cmd(unsigned char cmdl)
73 {
74 	static char buf[128];
75 	const char *type;
76 	const char *scope;
77 	const char *command;
78 
79 	switch (cmdl & LCK_TYPE_MASK) {
80 	case LCK_NULL:
81 		type = "NULL";
82 		break;
83 	case LCK_READ:
84 		type = "READ";
85 		break;
86 	case LCK_PREAD:
87 		type = "PREAD";
88 		break;
89 	case LCK_WRITE:
90 		type = "WRITE";
91 		break;
92 	case LCK_EXCL:
93 		type = "EXCL";
94 		break;
95 	case LCK_UNLOCK:
96 		type = "UNLOCK";
97 		break;
98 	default:
99 		type = "unknown";
100 		break;
101 	}
102 
103 	switch (cmdl & LCK_SCOPE_MASK) {
104 	case LCK_VG:
105 		scope = "VG";
106 		break;
107 	case LCK_LV:
108 		scope = "LV";
109 		break;
110 	default:
111 		scope = "unknown";
112 		break;
113 	}
114 
115 	switch (cmdl & LCK_MASK) {
116 	case LCK_LV_EXCLUSIVE & LCK_MASK:
117 		command = "LCK_LV_EXCLUSIVE";
118 		break;
119 	case LCK_LV_SUSPEND & LCK_MASK:
120 		command = "LCK_LV_SUSPEND";
121 		break;
122 	case LCK_LV_RESUME & LCK_MASK:
123 		command = "LCK_LV_RESUME";
124 		break;
125 	case LCK_LV_ACTIVATE & LCK_MASK:
126 		command = "LCK_LV_ACTIVATE";
127 		break;
128 	case LCK_LV_DEACTIVATE & LCK_MASK:
129 		command = "LCK_LV_DEACTIVATE";
130 		break;
131 	default:
132 		command = "unknown";
133 		break;
134 	}
135 
136 	sprintf(buf, "0x%x %s (%s|%s%s%s%s%s%s)", cmdl, command, type, scope,
137 		cmdl & LCK_NONBLOCK   ? "|NONBLOCK" : "",
138 		cmdl & LCK_HOLD       ? "|HOLD" : "",
139 		cmdl & LCK_LOCAL      ? "|LOCAL" : "",
140 		cmdl & LCK_CLUSTER_VG ? "|CLUSTER_VG" : "",
141 		cmdl & LCK_CACHE      ? "|CACHE" : "");
142 
143 	return buf;
144 }
145 
146 static const char *decode_flags(unsigned char flags)
147 {
148 	static char buf[128];
149 
150 	sprintf(buf, "0x%x (%s%s)", flags,
151 		flags & LCK_MIRROR_NOSYNC_MODE	  ? "MIRROR_NOSYNC " : "",
152 		flags & LCK_DMEVENTD_MONITOR_MODE ? "DMEVENTD_MONITOR " : "");
153 
154 	return buf;
155 }
156 
157 char *get_last_lvm_error()
158 {
159 	return last_error;
160 }
161 
162 /* Return the mode a lock is currently held at (or -1 if not held) */
163 static int get_current_lock(char *resource)
164 {
165 	struct lv_info *lvi;
166 
167 	pthread_mutex_lock(&lv_hash_lock);
168 	lvi = dm_hash_lookup(lv_hash, resource);
169 	pthread_mutex_unlock(&lv_hash_lock);
170 	if (lvi) {
171 		return lvi->lock_mode;
172 	} else {
173 		return -1;
174 	}
175 }
176 
177 /* Called at shutdown to tidy the lockspace */
178 void unlock_all()
179 {
180 	struct dm_hash_node *v;
181 
182 	pthread_mutex_lock(&lv_hash_lock);
183 	dm_hash_iterate(v, lv_hash) {
184 		struct lv_info *lvi = dm_hash_get_data(lv_hash, v);
185 
186 		sync_unlock(dm_hash_get_key(lv_hash, v), lvi->lock_id);
187 	}
188 	pthread_mutex_unlock(&lv_hash_lock);
189 }
190 
191 /* Gets a real lock and keeps the info in the hash table */
192 int hold_lock(char *resource, int mode, int flags)
193 {
194 	int status;
195 	int saved_errno;
196 	struct lv_info *lvi;
197 
198 	flags &= LKF_NOQUEUE;	/* Only LKF_NOQUEUE is valid here */
199 
200 	pthread_mutex_lock(&lv_hash_lock);
201 	lvi = dm_hash_lookup(lv_hash, resource);
202 	pthread_mutex_unlock(&lv_hash_lock);
203 	if (lvi) {
204 		/* Already exists - convert it */
205 		status =
206 		    sync_lock(resource, mode, LKF_CONVERT | flags,
207 			      &lvi->lock_id);
208 		saved_errno = errno;
209 		if (!status)
210 			lvi->lock_mode = mode;
211 
212 		if (status) {
213 			DEBUGLOG("hold_lock. convert to %d failed: %s\n", mode,
214 				 strerror(errno));
215 		}
216 		errno = saved_errno;
217 	} else {
218 		lvi = malloc(sizeof(struct lv_info));
219 		if (!lvi)
220 			return -1;
221 
222 		lvi->lock_mode = mode;
223 		status = sync_lock(resource, mode, flags, &lvi->lock_id);
224 		saved_errno = errno;
225 		if (status) {
226 			free(lvi);
227 			DEBUGLOG("hold_lock. lock at %d failed: %s\n", mode,
228 				 strerror(errno));
229 		} else {
230 		        pthread_mutex_lock(&lv_hash_lock);
231 			dm_hash_insert(lv_hash, resource, lvi);
232 			pthread_mutex_unlock(&lv_hash_lock);
233 		}
234 		errno = saved_errno;
235 	}
236 	return status;
237 }
238 
239 /* Unlock and remove it from the hash table */
240 int hold_unlock(char *resource)
241 {
242 	struct lv_info *lvi;
243 	int status;
244 	int saved_errno;
245 
246 	pthread_mutex_lock(&lv_hash_lock);
247 	lvi = dm_hash_lookup(lv_hash, resource);
248 	pthread_mutex_unlock(&lv_hash_lock);
249 	if (!lvi) {
250 		DEBUGLOG("hold_unlock, lock not already held\n");
251 		return 0;
252 	}
253 
254 	status = sync_unlock(resource, lvi->lock_id);
255 	saved_errno = errno;
256 	if (!status) {
257 	    	pthread_mutex_lock(&lv_hash_lock);
258 		dm_hash_remove(lv_hash, resource);
259 		pthread_mutex_unlock(&lv_hash_lock);
260 		free(lvi);
261 	} else {
262 		DEBUGLOG("hold_unlock. unlock failed(%d): %s\n", status,
263 			 strerror(errno));
264 	}
265 
266 	errno = saved_errno;
267 	return status;
268 }
269 
270 /* Watch the return codes here.
271    liblvm API functions return 1(true) for success, 0(false) for failure and don't set errno.
272    libdlm API functions return 0 for success, -1 for failure and do set errno.
273    These functions here return 0 for success or >0 for failure (where the retcode is errno)
274 */
275 
276 /* Activate LV exclusive or non-exclusive */
277 static int do_activate_lv(char *resource, unsigned char lock_flags, int mode)
278 {
279 	int oldmode;
280 	int status;
281 	int activate_lv;
282 	int exclusive = 0;
283 	struct lvinfo lvi;
284 
285 	/* Is it already open ? */
286 	oldmode = get_current_lock(resource);
287 	if (oldmode == mode) {
288 		return 0;	/* Nothing to do */
289 	}
290 
291 	/* Does the config file want us to activate this LV ? */
292 	if (!lv_activation_filter(cmd, resource, &activate_lv))
293 		return EIO;
294 
295 	if (!activate_lv)
296 		return 0;	/* Success, we did nothing! */
297 
298 	/* Do we need to activate exclusively? */
299 	if ((activate_lv == 2) || (mode == LKM_EXMODE)) {
300 		exclusive = 1;
301 		mode = LKM_EXMODE;
302 	}
303 
304 	/* Try to get the lock if it's a clustered volume group */
305 	if (lock_flags & LCK_CLUSTER_VG) {
306 		status = hold_lock(resource, mode, LKF_NOQUEUE);
307 		if (status) {
308 			/* Return an LVM-sensible error for this.
309 			 * Forcing EIO makes the upper level return this text
310 			 * rather than the strerror text for EAGAIN.
311 			 */
312 			if (errno == EAGAIN) {
313 				sprintf(last_error, "Volume is busy on another node");
314 				errno = EIO;
315 			}
316 			return errno;
317 		}
318 	}
319 
320 	/* If it's suspended then resume it */
321 	if (!lv_info_by_lvid(cmd, resource, &lvi, 0, 0))
322 		return EIO;
323 
324 	if (lvi.suspended)
325 		if (!lv_resume(cmd, resource))
326 			return EIO;
327 
328 	/* Now activate it */
329 	if (!lv_activate(cmd, resource, exclusive))
330 		return EIO;
331 
332 	return 0;
333 }
334 
335 /* Resume the LV if it was active */
336 static int do_resume_lv(char *resource)
337 {
338 	int oldmode;
339 
340 	/* Is it open ? */
341 	oldmode = get_current_lock(resource);
342 	if (oldmode == -1) {
343 		DEBUGLOG("do_resume_lv, lock not already held\n");
344 		return 0;	/* We don't need to do anything */
345 	}
346 
347 	if (!lv_resume_if_active(cmd, resource))
348 		return EIO;
349 
350 	return 0;
351 }
352 
353 /* Suspend the device if active */
354 static int do_suspend_lv(char *resource)
355 {
356 	int oldmode;
357 	struct lvinfo lvi;
358 
359 	/* Is it open ? */
360 	oldmode = get_current_lock(resource);
361 	if (oldmode == -1) {
362 		DEBUGLOG("do_suspend_lv, lock held at %d\n", oldmode);
363 		return 0; /* Not active, so it's OK */
364 	}
365 
366 	/* Only suspend it if it exists */
367 	if (!lv_info_by_lvid(cmd, resource, &lvi, 0, 0))
368 		return EIO;
369 
370 	if (lvi.exists) {
371 		if (!lv_suspend_if_active(cmd, resource)) {
372 			return EIO;
373 		}
374 	}
375 	return 0;
376 }
377 
378 static int do_deactivate_lv(char *resource, unsigned char lock_flags)
379 {
380 	int oldmode;
381 	int status;
382 
383 	/* Is it open ? */
384 	oldmode = get_current_lock(resource);
385 	if (oldmode == -1 && (lock_flags & LCK_CLUSTER_VG)) {
386 		DEBUGLOG("do_deactivate_lock, lock not already held\n");
387 		return 0;	/* We don't need to do anything */
388 	}
389 
390 	if (!lv_deactivate(cmd, resource))
391 		return EIO;
392 
393 	if (lock_flags & LCK_CLUSTER_VG) {
394 		status = hold_unlock(resource);
395 		if (status)
396 			return errno;
397 	}
398 
399 	return 0;
400 }
401 
402 /* This is the LOCK_LV part that happens on all nodes in the cluster -
403    it is responsible for the interaction with device-mapper and LVM */
404 int do_lock_lv(unsigned char command, unsigned char lock_flags, char *resource)
405 {
406 	int status = 0;
407 
408 	DEBUGLOG("do_lock_lv: resource '%s', cmd = %s, flags = %s\n",
409 		 resource, decode_locking_cmd(command), decode_flags(lock_flags));
410 
411 	pthread_mutex_lock(&lvm_lock);
412 	if (!cmd->config_valid || config_files_changed(cmd)) {
413 		/* Reinitialise various settings inc. logging, filters */
414 		if (do_refresh_cache()) {
415 			log_error("Updated config file invalid. Aborting.");
416 			pthread_mutex_unlock(&lvm_lock);
417 			return EINVAL;
418 		}
419 	}
420 
421 	if (lock_flags & LCK_MIRROR_NOSYNC_MODE)
422 		init_mirror_in_sync(1);
423 
424 	if (!(lock_flags & LCK_DMEVENTD_MONITOR_MODE))
425 		init_dmeventd_monitor(0);
426 
427 	switch (command) {
428 	case LCK_LV_EXCLUSIVE:
429 		status = do_activate_lv(resource, lock_flags, LKM_EXMODE);
430 		break;
431 
432 	case LCK_LV_SUSPEND:
433 		status = do_suspend_lv(resource);
434 		if (!status)
435 			suspended++;
436 		break;
437 
438 	case LCK_UNLOCK:
439 	case LCK_LV_RESUME:	/* if active */
440 		status = do_resume_lv(resource);
441 		if (!status)
442 			suspended--;
443 		break;
444 
445 	case LCK_LV_ACTIVATE:
446 		status = do_activate_lv(resource, lock_flags, LKM_CRMODE);
447 		break;
448 
449 	case LCK_LV_DEACTIVATE:
450 		status = do_deactivate_lv(resource, lock_flags);
451 		break;
452 
453 	default:
454 		DEBUGLOG("Invalid LV command 0x%x\n", command);
455 		status = EINVAL;
456 		break;
457 	}
458 
459 	if (lock_flags & LCK_MIRROR_NOSYNC_MODE)
460 		init_mirror_in_sync(0);
461 
462 	if (!(lock_flags & LCK_DMEVENTD_MONITOR_MODE))
463 		init_dmeventd_monitor(DEFAULT_DMEVENTD_MONITOR);
464 
465 	/* clean the pool for another command */
466 	dm_pool_empty(cmd->mem);
467 	pthread_mutex_unlock(&lvm_lock);
468 
469 	DEBUGLOG("Command return is %d\n", status);
470 	return status;
471 }
472 
473 /* Functions to do on the local node only BEFORE the cluster-wide stuff above happens */
474 int pre_lock_lv(unsigned char command, unsigned char lock_flags, char *resource)
475 {
476 	/* Nearly all the stuff happens cluster-wide. Apart from SUSPEND. Here we get the
477 	   lock out on this node (because we are the node modifying the metadata)
478 	   before suspending cluster-wide.
479 	 */
480 	if (command == LCK_LV_SUSPEND) {
481 		DEBUGLOG("pre_lock_lv: resource '%s', cmd = %s, flags = %s\n",
482 			 resource, decode_locking_cmd(command), decode_flags(lock_flags));
483 
484 		if (hold_lock(resource, LKM_PWMODE, LKF_NOQUEUE))
485 			return errno;
486 	}
487 	return 0;
488 }
489 
490 /* Functions to do on the local node only AFTER the cluster-wide stuff above happens */
491 int post_lock_lv(unsigned char command, unsigned char lock_flags,
492 		 char *resource)
493 {
494 	int status;
495 
496 	/* Opposite of above, done on resume after a metadata update */
497 	if (command == LCK_LV_RESUME) {
498 		int oldmode;
499 
500 		DEBUGLOG
501 		    ("post_lock_lv: resource '%s', cmd = %s, flags = %s\n",
502 		     resource, decode_locking_cmd(command), decode_flags(lock_flags));
503 
504 		/* If the lock state is PW then restore it to what it was */
505 		oldmode = get_current_lock(resource);
506 		if (oldmode == LKM_PWMODE) {
507 			struct lvinfo lvi;
508 
509 			pthread_mutex_lock(&lvm_lock);
510 			status = lv_info_by_lvid(cmd, resource, &lvi, 0, 0);
511 			pthread_mutex_unlock(&lvm_lock);
512 			if (!status)
513 				return EIO;
514 
515 			if (lvi.exists) {
516 				if (hold_lock(resource, LKM_CRMODE, 0))
517 					return errno;
518 			} else {
519 				if (hold_unlock(resource))
520 					return errno;
521 			}
522 		}
523 	}
524 	return 0;
525 }
526 
527 /* Check if a VG is in use by LVM1 so we don't stomp on it */
528 int do_check_lvm1(const char *vgname)
529 {
530 	int status;
531 
532 	status = check_lvm1_vg_inactive(cmd, vgname);
533 
534 	return status == 1 ? 0 : EBUSY;
535 }
536 
537 int do_refresh_cache()
538 {
539 	int ret;
540 	DEBUGLOG("Refreshing context\n");
541 	log_notice("Refreshing context");
542 
543 	ret = refresh_toolcontext(cmd);
544 	init_full_scan_done(0);
545 	lvmcache_label_scan(cmd, 2);
546 
547 	return ret==1?0:-1;
548 }
549 
550 
551 /* Only called at gulm startup. Drop any leftover VG or P_orphan locks
552    that might be hanging around if we died for any reason
553 */
554 static void drop_vg_locks()
555 {
556 	char vg[128];
557 	char line[255];
558 	FILE *vgs =
559 	    popen
560 	    ("lvm pvs  --config 'log{command_names=0 prefix=\"\"}' --nolocking --noheadings -o vg_name", "r");
561 
562 	sync_unlock("P_" VG_ORPHANS, LCK_EXCL);
563 	sync_unlock("P_" VG_GLOBAL, LCK_EXCL);
564 
565 	if (!vgs)
566 		return;
567 
568 	while (fgets(line, sizeof(line), vgs)) {
569 		char *vgend;
570 		char *vgstart;
571 
572 		if (line[strlen(line)-1] == '\n')
573 			line[strlen(line)-1] = '\0';
574 
575 		vgstart = line + strspn(line, " ");
576 		vgend = vgstart + strcspn(vgstart, " ");
577 		*vgend = '\0';
578 
579 		if (strncmp(vgstart, "WARNING:", 8) == 0)
580 			continue;
581 
582 		sprintf(vg, "V_%s", vgstart);
583 		sync_unlock(vg, LCK_EXCL);
584 
585 	}
586 	if (fclose(vgs))
587 		DEBUGLOG("vgs fclose failed: %s\n", strerror(errno));
588 }
589 
590 /*
591  * Drop lvmcache metadata
592  */
593 void drop_metadata(const char *vgname)
594 {
595 	DEBUGLOG("Dropping metadata for VG %s\n", vgname);
596 	pthread_mutex_lock(&lvm_lock);
597 	lvmcache_drop_metadata(vgname);
598 	pthread_mutex_unlock(&lvm_lock);
599 }
600 
601 /*
602  * Ideally, clvmd should be started before any LVs are active
603  * but this may not be the case...
604  * I suppose this also comes in handy if clvmd crashes, not that it would!
605  */
606 static void *get_initial_state()
607 {
608 	char lv[64], vg[64], flags[25], vg_flags[25];
609 	char uuid[65];
610 	char line[255];
611 	FILE *lvs =
612 	    popen
613 	    ("lvm lvs  --config 'log{command_names=0 prefix=\"\"}' --nolocking --noheadings -o vg_uuid,lv_uuid,lv_attr,vg_attr",
614 	     "r");
615 
616 	if (!lvs)
617 		return NULL;
618 
619 	while (fgets(line, sizeof(line), lvs)) {
620 	        if (sscanf(line, "%s %s %s %s\n", vg, lv, flags, vg_flags) == 4) {
621 
622 			/* States: s:suspended a:active S:dropped snapshot I:invalid snapshot */
623 		        if (strlen(vg) == 38 &&                         /* is is a valid UUID ? */
624 			    (flags[4] == 'a' || flags[4] == 's') &&	/* is it active or suspended? */
625 			    vg_flags[5] == 'c') {			/* is it clustered ? */
626 				/* Convert hyphen-separated UUIDs into one */
627 				memcpy(&uuid[0], &vg[0], 6);
628 				memcpy(&uuid[6], &vg[7], 4);
629 				memcpy(&uuid[10], &vg[12], 4);
630 				memcpy(&uuid[14], &vg[17], 4);
631 				memcpy(&uuid[18], &vg[22], 4);
632 				memcpy(&uuid[22], &vg[27], 4);
633 				memcpy(&uuid[26], &vg[32], 6);
634 				memcpy(&uuid[32], &lv[0], 6);
635 				memcpy(&uuid[38], &lv[7], 4);
636 				memcpy(&uuid[42], &lv[12], 4);
637 				memcpy(&uuid[46], &lv[17], 4);
638 				memcpy(&uuid[50], &lv[22], 4);
639 				memcpy(&uuid[54], &lv[27], 4);
640 				memcpy(&uuid[58], &lv[32], 6);
641 				uuid[64] = '\0';
642 
643 				DEBUGLOG("getting initial lock for %s\n", uuid);
644 				hold_lock(uuid, LKM_CRMODE, LKF_NOQUEUE);
645 			}
646 		}
647 	}
648 	if (fclose(lvs))
649 		DEBUGLOG("lvs fclose failed: %s\n", strerror(errno));
650 	return NULL;
651 }
652 
653 static void lvm2_log_fn(int level, const char *file, int line,
654 			const char *message)
655 {
656 
657 	/* Send messages to the normal LVM2 logging system too,
658 	   so we get debug output when it's asked for.
659  	   We need to NULL the function ptr otherwise it will just call
660 	   back into here! */
661 	init_log_fn(NULL);
662 	print_log(level, file, line, "%s", message);
663 	init_log_fn(lvm2_log_fn);
664 
665 	/*
666 	 * Ignore non-error messages, but store the latest one for returning
667 	 * to the user.
668 	 */
669 	if (level != _LOG_ERR && level != _LOG_FATAL)
670 		return;
671 
672 	strncpy(last_error, message, sizeof(last_error));
673 	last_error[sizeof(last_error)-1] = '\0';
674 }
675 
676 /* This checks some basic cluster-LVM configuration stuff */
677 static void check_config()
678 {
679 	int locking_type;
680 
681 	locking_type = find_config_tree_int(cmd, "global/locking_type", 1);
682 
683 	if (locking_type == 3) /* compiled-in cluster support */
684 		return;
685 
686 	if (locking_type == 2) { /* External library, check name */
687 		const char *libname;
688 
689 		libname = find_config_tree_str(cmd, "global/locking_library",
690 					  "");
691 		if (strstr(libname, "liblvm2clusterlock.so"))
692 			return;
693 
694 		log_error("Incorrect LVM locking library specified in lvm.conf, cluster operations may not work.");
695 		return;
696 	}
697 	log_error("locking_type not set correctly in lvm.conf, cluster operations will not work.");
698 }
699 
700 void init_lvhash()
701 {
702 	/* Create hash table for keeping LV locks & status */
703 	lv_hash = dm_hash_create(100);
704 	pthread_mutex_init(&lv_hash_lock, NULL);
705 	pthread_mutex_init(&lvm_lock, NULL);
706 }
707 
708 /* Backups up the LVM metadata if it's changed */
709 void lvm_do_backup(const char *vgname)
710 {
711 	struct volume_group * vg;
712 	int consistent = 0;
713 
714 	DEBUGLOG("Triggering backup of VG metadata for %s. suspended=%d\n", vgname, suspended);
715 
716 	vg = vg_read(cmd, vgname, NULL /*vgid*/, &consistent);
717 	if (vg) {
718 		if (consistent)
719 			check_current_backup(vg);
720 	}
721 	else {
722 		log_error("Error backing up metadata, can't find VG for group %s", vgname);
723 	}
724 }
725 
726 /* Called to initialise the LVM context of the daemon */
727 int init_lvm(int using_gulm)
728 {
729 	if (!(cmd = create_toolcontext(1))) {
730 		log_error("Failed to allocate command context");
731 		return 0;
732 	}
733 
734 	/* Use LOG_DAEMON for syslog messages instead of LOG_USER */
735 	init_syslog(LOG_DAEMON);
736 	openlog("clvmd", LOG_PID, LOG_DAEMON);
737 	cmd->cmd_line = (char *)"clvmd";
738 
739 	/* Check lvm.conf is setup for cluster-LVM */
740 	check_config();
741 
742 	/* Remove any non-LV locks that may have been left around */
743 	if (using_gulm)
744 		drop_vg_locks();
745 
746 	get_initial_state();
747 
748 	/* Trap log messages so we can pass them back to the user */
749 	init_log_fn(lvm2_log_fn);
750 
751 	return 1;
752 }
753