xref: /netbsd-src/external/gpl2/lvm2/dist/lib/mirror/mirrored.c (revision b5677b36047b601b9addaaa494a58ceae82c2a6c)
1 /*	$NetBSD: mirrored.c,v 1.1.1.1 2008/12/22 00:18:12 haad Exp $	*/
2 
3 /*
4  * Copyright (C) 2003-2004 Sistina Software, Inc. All rights reserved.
5  * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
6  *
7  * This file is part of LVM2.
8  *
9  * This copyrighted material is made available to anyone wishing to use,
10  * modify, copy, or redistribute it subject to the terms and conditions
11  * of the GNU Lesser General Public License v.2.1.
12  *
13  * You should have received a copy of the GNU Lesser General Public License
14  * along with this program; if not, write to the Free Software Foundation,
15  * Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
16  */
17 
18 #include "lib.h"
19 #include "toolcontext.h"
20 #include "metadata.h"
21 #include "segtype.h"
22 #include "display.h"
23 #include "text_export.h"
24 #include "text_import.h"
25 #include "config.h"
26 #include "defaults.h"
27 #include "lvm-string.h"
28 #include "targets.h"
29 #include "activate.h"
30 #include "sharedlib.h"
31 #include "str_list.h"
32 
33 #ifdef DMEVENTD
34 #  include "libdevmapper-event.h"
35 #endif
36 
37 static int _block_on_error_available = 0;
38 static unsigned _mirror_attributes = 0;
39 
40 enum {
41 	MIRR_DISABLED,
42 	MIRR_RUNNING,
43 	MIRR_COMPLETED
44 };
45 
46 struct mirror_state {
47 	uint32_t default_region_size;
48 };
49 
50 static const char *_mirrored_name(const struct lv_segment *seg)
51 {
52 	return seg->segtype->name;
53 }
54 
55 static void _mirrored_display(const struct lv_segment *seg)
56 {
57 	const char *size;
58 	uint32_t s;
59 
60 	log_print("  Mirrors\t\t%u", seg->area_count);
61 	log_print("  Mirror size\t\t%u", seg->area_len);
62 	if (seg->log_lv)
63 		log_print("  Mirror log volume\t%s", seg->log_lv->name);
64 
65 	if (seg->region_size) {
66 		size = display_size(seg->lv->vg->cmd,
67 				    (uint64_t) seg->region_size);
68 		log_print("  Mirror region size\t%s", size);
69 	}
70 
71 	log_print("  Mirror original:");
72 	display_stripe(seg, 0, "    ");
73 	log_print("  Mirror destinations:");
74 	for (s = 1; s < seg->area_count; s++)
75 		display_stripe(seg, s, "    ");
76 	log_print(" ");
77 }
78 
79 static int _mirrored_text_import_area_count(struct config_node *sn, uint32_t *area_count)
80 {
81 	if (!get_config_uint32(sn, "mirror_count", area_count)) {
82 		log_error("Couldn't read 'mirror_count' for "
83 			  "segment '%s'.", sn->key);
84 		return 0;
85 	}
86 
87 	return 1;
88 }
89 
90 static int _mirrored_text_import(struct lv_segment *seg, const struct config_node *sn,
91 			struct dm_hash_table *pv_hash)
92 {
93 	const struct config_node *cn;
94 	char *logname = NULL;
95 
96 	if (find_config_node(sn, "extents_moved")) {
97 		if (get_config_uint32(sn, "extents_moved",
98 				      &seg->extents_copied))
99 			seg->status |= PVMOVE;
100 		else {
101 			log_error("Couldn't read 'extents_moved' for "
102 				  "segment '%s'.", sn->key);
103 			return 0;
104 		}
105 	}
106 
107 	if (find_config_node(sn, "region_size")) {
108 		if (!get_config_uint32(sn, "region_size",
109 				      &seg->region_size)) {
110 			log_error("Couldn't read 'region_size' for "
111 				  "segment '%s'.", sn->key);
112 			return 0;
113 		}
114 	}
115 
116 	if ((cn = find_config_node(sn, "mirror_log"))) {
117 		if (!cn->v || !cn->v->v.str) {
118 			log_error("Mirror log type must be a string.");
119 			return 0;
120 		}
121 		logname = cn->v->v.str;
122 		if (!(seg->log_lv = find_lv(seg->lv->vg, logname))) {
123 			log_error("Unrecognised mirror log in segment %s.",
124 				  sn->key);
125 			return 0;
126 		}
127 		seg->log_lv->status |= MIRROR_LOG;
128 	}
129 
130 	if (logname && !seg->region_size) {
131 		log_error("Missing region size for mirror log for segment "
132 			  "'%s'.", sn->key);
133 		return 0;
134 	}
135 
136 	if (!(cn = find_config_node(sn, "mirrors"))) {
137 		log_error("Couldn't find mirrors array for segment "
138 			  "'%s'.", sn->key);
139 		return 0;
140 	}
141 
142 	return text_import_areas(seg, sn, cn, pv_hash, MIRROR_IMAGE);
143 }
144 
145 static int _mirrored_text_export(const struct lv_segment *seg, struct formatter *f)
146 {
147 	outf(f, "mirror_count = %u", seg->area_count);
148 	if (seg->status & PVMOVE)
149 		out_size(f, (uint64_t) seg->extents_copied * seg->lv->vg->extent_size,
150 			 "extents_moved = %" PRIu32, seg->extents_copied);
151 	if (seg->log_lv)
152 		outf(f, "mirror_log = \"%s\"", seg->log_lv->name);
153 	if (seg->region_size)
154 		outf(f, "region_size = %" PRIu32, seg->region_size);
155 
156 	return out_areas(f, seg, "mirror");
157 }
158 
159 #ifdef DEVMAPPER_SUPPORT
160 static struct mirror_state *_mirrored_init_target(struct dm_pool *mem,
161 					 struct cmd_context *cmd)
162 {
163 	struct mirror_state *mirr_state;
164 
165 	if (!(mirr_state = dm_pool_alloc(mem, sizeof(*mirr_state)))) {
166 		log_error("struct mirr_state allocation failed");
167 		return NULL;
168 	}
169 
170 	mirr_state->default_region_size = 2 *
171 	    find_config_tree_int(cmd,
172 			    "activation/mirror_region_size",
173 			    DEFAULT_MIRROR_REGION_SIZE);
174 
175 	return mirr_state;
176 }
177 
178 static int _mirrored_target_percent(void **target_state, struct dm_pool *mem,
179 			   struct cmd_context *cmd, struct lv_segment *seg,
180 			   char *params, uint64_t *total_numerator,
181 			   uint64_t *total_denominator)
182 {
183 	struct mirror_state *mirr_state;
184 	uint64_t numerator, denominator;
185 	unsigned mirror_count, m;
186 	int used;
187 	char *pos = params;
188 
189 	if (!*target_state)
190 		*target_state = _mirrored_init_target(mem, cmd);
191 
192 	mirr_state = *target_state;
193 
194 	/* Status line: <#mirrors> (maj:min)+ <synced>/<total_regions> */
195 	log_debug("Mirror status: %s", params);
196 
197 	if (sscanf(pos, "%u %n", &mirror_count, &used) != 1) {
198 		log_error("Failure parsing mirror status mirror count: %s",
199 			  params);
200 		return 0;
201 	}
202 	pos += used;
203 
204 	for (m = 0; m < mirror_count; m++) {
205 		if (sscanf(pos, "%*x:%*x %n", &used) != 0) {
206 			log_error("Failure parsing mirror status devices: %s",
207 				  params);
208 			return 0;
209 		}
210 		pos += used;
211 	}
212 
213 	if (sscanf(pos, "%" PRIu64 "/%" PRIu64 "%n", &numerator, &denominator,
214 		   &used) != 2) {
215 		log_error("Failure parsing mirror status fraction: %s", params);
216 		return 0;
217 	}
218 	pos += used;
219 
220 	*total_numerator += numerator;
221 	*total_denominator += denominator;
222 
223 	if (seg)
224 		seg->extents_copied = seg->area_len * numerator / denominator;
225 
226 	return 1;
227 }
228 
229 static int _add_log(struct dev_manager *dm, struct lv_segment *seg,
230 		    struct dm_tree_node *node, uint32_t area_count, uint32_t region_size)
231 {
232 	unsigned clustered = 0;
233 	char *log_dlid = NULL;
234 	uint32_t log_flags = 0;
235 
236 	/*
237 	 * Use clustered mirror log for non-exclusive activation
238 	 * in clustered VG.
239 	 */
240 	if ((!(seg->lv->status & ACTIVATE_EXCL) &&
241 	      (vg_is_clustered(seg->lv->vg))))
242 		clustered = 1;
243 
244 	if (seg->log_lv) {
245 		/* If disk log, use its UUID */
246 		if (!(log_dlid = build_dlid(dm, seg->log_lv->lvid.s, NULL))) {
247 			log_error("Failed to build uuid for log LV %s.",
248 				  seg->log_lv->name);
249 			return 0;
250 		}
251 	} else {
252 		/* If core log, use mirror's UUID and set DM_CORELOG flag */
253 		if (!(log_dlid = build_dlid(dm, seg->lv->lvid.s, NULL))) {
254 			log_error("Failed to build uuid for mirror LV %s.",
255 				  seg->lv->name);
256 			return 0;
257 		}
258 		log_flags |= DM_CORELOG;
259 	}
260 
261 	if (mirror_in_sync() && !(seg->status & PVMOVE))
262 		log_flags |= DM_NOSYNC;
263 
264 	if (_block_on_error_available && !(seg->status & PVMOVE))
265 		log_flags |= DM_BLOCK_ON_ERROR;
266 
267 	return dm_tree_node_add_mirror_target_log(node, region_size, clustered, log_dlid, area_count, log_flags);
268 }
269 
270 static int _mirrored_add_target_line(struct dev_manager *dm, struct dm_pool *mem,
271 				struct cmd_context *cmd, void **target_state,
272 				struct lv_segment *seg,
273 				struct dm_tree_node *node, uint64_t len,
274 				uint32_t *pvmove_mirror_count)
275 {
276 	struct mirror_state *mirr_state;
277 	uint32_t area_count = seg->area_count;
278 	unsigned start_area = 0u;
279 	int mirror_status = MIRR_RUNNING;
280 	uint32_t region_size, region_max;
281 	int r;
282 
283 	if (!*target_state)
284 		*target_state = _mirrored_init_target(mem, cmd);
285 
286 	mirr_state = *target_state;
287 
288 	/*
289 	 * Mirror segment could have only 1 area temporarily
290 	 * if the segment is under conversion.
291 	 */
292  	if (seg->area_count == 1)
293 		mirror_status = MIRR_DISABLED;
294 
295 	/*
296 	 * For pvmove, only have one mirror segment RUNNING at once.
297 	 * Segments before this are COMPLETED and use 2nd area.
298 	 * Segments after this are DISABLED and use 1st area.
299 	 */
300 	if (seg->status & PVMOVE) {
301 		if (seg->extents_copied == seg->area_len) {
302 			mirror_status = MIRR_COMPLETED;
303 			start_area = 1;
304 		} else if ((*pvmove_mirror_count)++) {
305 			mirror_status = MIRR_DISABLED;
306 			area_count = 1;
307 		}
308 		/* else MIRR_RUNNING */
309 	}
310 
311 	if (mirror_status != MIRR_RUNNING) {
312 		if (!dm_tree_node_add_linear_target(node, len))
313 			return_0;
314 		goto done;
315 	}
316 
317 	if (!(seg->status & PVMOVE)) {
318 		if (!seg->region_size) {
319 			log_error("Missing region size for mirror segment.");
320 			return 0;
321 		}
322 		region_size = seg->region_size;
323 	} else {
324 		/* Find largest power of 2 region size unit we can use */
325 		region_max = (1 << (ffs((int)seg->area_len) - 1)) *
326 		      seg->lv->vg->extent_size;
327 
328 		region_size = mirr_state->default_region_size;
329 		if (region_max < region_size) {
330 			region_size = region_max;
331 			log_verbose("Using reduced mirror region size of %u sectors",
332 				    region_size);
333 		}
334 	}
335 
336 	if (!dm_tree_node_add_mirror_target(node, len))
337 		return_0;
338 
339 	if ((r = _add_log(dm, seg, node, area_count, region_size)) <= 0) {
340 		stack;
341 		return r;
342 	}
343 
344       done:
345 	return add_areas_line(dm, seg, node, start_area, area_count);
346 }
347 
348 static int _mirrored_target_present(const struct lv_segment *seg __attribute((unused)),
349 				    unsigned *attributes)
350 {
351 	static int _mirrored_checked = 0;
352 	static int _mirrored_present = 0;
353 	uint32_t maj, min, patchlevel;
354 	unsigned maj2, min2, patchlevel2;
355 	char vsn[80];
356 
357 	if (!_mirrored_checked) {
358 		_mirrored_present = target_present("mirror", 1);
359 
360 		/*
361 		 * block_on_error available with mirror target >= 1.1 and <= 1.11
362 		 * or with 1.0 in RHEL4U3 driver >= 4.5
363 		 */
364 		/* FIXME Move this into libdevmapper */
365 
366 		if (target_version("mirror", &maj, &min, &patchlevel) &&
367 		    maj == 1 &&
368 		    ((min >= 1 && min <= 11) ||
369 		     (min == 0 && driver_version(vsn, sizeof(vsn)) &&
370 		      sscanf(vsn, "%u.%u.%u", &maj2, &min2, &patchlevel2) == 3 &&
371 		      maj2 == 4 && min2 == 5 && patchlevel2 == 0)))	/* RHEL4U3 */
372 			_block_on_error_available = 1;
373 	}
374 
375 	/*
376 	 * Check only for modules if atttributes requested and no previous check.
377 	 * FIXME: Fails incorrectly if cmirror was built into kernel.
378 	 */
379 	if (attributes) {
380 		if (!_mirror_attributes && module_present("log-clustered"))
381 			_mirror_attributes |= MIRROR_LOG_CLUSTERED;
382 		*attributes = _mirror_attributes;
383 	}
384 	_mirrored_checked = 1;
385 
386 	return _mirrored_present;
387 }
388 
389 #ifdef DMEVENTD
390 static int _get_mirror_dso_path(struct cmd_context *cmd, char **dso)
391 {
392 	char *path;
393 	const char *libpath;
394 
395 	if (!(path = dm_pool_alloc(cmd->mem, PATH_MAX))) {
396 		log_error("Failed to allocate dmeventd library path.");
397 		return 0;
398 	}
399 
400 	libpath = find_config_tree_str(cmd, "dmeventd/mirror_library",
401 				       DEFAULT_DMEVENTD_MIRROR_LIB);
402 
403 	get_shared_library_path(cmd, libpath, path, PATH_MAX);
404 
405 	*dso = path;
406 
407 	return 1;
408 }
409 
410 static struct dm_event_handler *_create_dm_event_handler(const char *dmname,
411 							 const char *dso,
412 							 enum dm_event_mask mask)
413 {
414 	struct dm_event_handler *dmevh;
415 
416 	if (!(dmevh = dm_event_handler_create()))
417 		return_0;
418 
419        if (dm_event_handler_set_dso(dmevh, dso))
420 		goto fail;
421 
422 	if (dm_event_handler_set_dev_name(dmevh, dmname))
423 		goto fail;
424 
425 	dm_event_handler_set_event_mask(dmevh, mask);
426 	return dmevh;
427 
428 fail:
429 	dm_event_handler_destroy(dmevh);
430 	return NULL;
431 }
432 
433 static int _target_monitored(struct lv_segment *seg, int *pending)
434 {
435 	char *dso, *name;
436 	struct logical_volume *lv;
437 	struct volume_group *vg;
438 	enum dm_event_mask evmask = 0;
439 	struct dm_event_handler *dmevh;
440 
441 	lv = seg->lv;
442 	vg = lv->vg;
443 
444 	*pending = 0;
445 	if (!_get_mirror_dso_path(vg->cmd, &dso))
446 		return_0;
447 
448 	if (!(name = build_dm_name(vg->cmd->mem, vg->name, lv->name, NULL)))
449 		return_0;
450 
451 	if (!(dmevh = _create_dm_event_handler(name, dso, DM_EVENT_ALL_ERRORS)))
452 		return_0;
453 
454 	if (dm_event_get_registered_device(dmevh, 0)) {
455 		dm_event_handler_destroy(dmevh);
456 		return 0;
457 	}
458 
459 	evmask = dm_event_handler_get_event_mask(dmevh);
460 	if (evmask & DM_EVENT_REGISTRATION_PENDING) {
461 		*pending = 1;
462 		evmask &= ~DM_EVENT_REGISTRATION_PENDING;
463 	}
464 
465 	dm_event_handler_destroy(dmevh);
466 
467 	return evmask;
468 }
469 
470 /* FIXME This gets run while suspended and performs banned operations. */
471 static int _target_set_events(struct lv_segment *seg,
472 			      int evmask __attribute((unused)), int set)
473 {
474 	char *dso, *name;
475 	struct logical_volume *lv;
476 	struct volume_group *vg;
477 	struct dm_event_handler *dmevh;
478 	int r;
479 
480 	lv = seg->lv;
481 	vg = lv->vg;
482 
483 	if (!_get_mirror_dso_path(vg->cmd, &dso))
484 		return_0;
485 
486 	if (!(name = build_dm_name(vg->cmd->mem, vg->name, lv->name, NULL)))
487 		return_0;
488 
489 	if (!(dmevh = _create_dm_event_handler(name, dso, DM_EVENT_ALL_ERRORS)))
490 		return_0;
491 
492 	r = set ? dm_event_register_handler(dmevh) : dm_event_unregister_handler(dmevh);
493 	dm_event_handler_destroy(dmevh);
494 	if (!r)
495 		return_0;
496 
497 	log_info("%s %s for events", set ? "Monitored" : "Unmonitored", name);
498 
499 	return 1;
500 }
501 
502 static int _target_monitor_events(struct lv_segment *seg, int events)
503 {
504 	return _target_set_events(seg, events, 1);
505 }
506 
507 static int _target_unmonitor_events(struct lv_segment *seg, int events)
508 {
509 	return _target_set_events(seg, events, 0);
510 }
511 
512 #endif /* DMEVENTD */
513 #endif /* DEVMAPPER_SUPPORT */
514 
515 static int _mirrored_modules_needed(struct dm_pool *mem,
516 				    const struct lv_segment *seg,
517 				    struct dm_list *modules)
518 {
519 	if (seg->log_lv &&
520 	    !list_segment_modules(mem, first_seg(seg->log_lv), modules))
521 		return_0;
522 
523 	if (vg_is_clustered(seg->lv->vg) &&
524 	    !str_list_add(mem, modules, "clog")) {
525 		log_error("cluster log string list allocation failed");
526 		return 0;
527 	}
528 
529 	if (!str_list_add(mem, modules, "mirror")) {
530 		log_error("mirror string list allocation failed");
531 		return 0;
532 	}
533 
534 	return 1;
535 }
536 
537 static void _mirrored_destroy(const struct segment_type *segtype)
538 {
539 	dm_free((void *) segtype);
540 }
541 
542 static struct segtype_handler _mirrored_ops = {
543 	.name = _mirrored_name,
544 	.display = _mirrored_display,
545 	.text_import_area_count = _mirrored_text_import_area_count,
546 	.text_import = _mirrored_text_import,
547 	.text_export = _mirrored_text_export,
548 #ifdef DEVMAPPER_SUPPORT
549 	.add_target_line = _mirrored_add_target_line,
550 	.target_percent = _mirrored_target_percent,
551 	.target_present = _mirrored_target_present,
552 #ifdef DMEVENTD
553 	.target_monitored = _target_monitored,
554 	.target_monitor_events = _target_monitor_events,
555 	.target_unmonitor_events = _target_unmonitor_events,
556 #endif
557 #endif
558 	.modules_needed = _mirrored_modules_needed,
559 	.destroy = _mirrored_destroy,
560 };
561 
562 #ifdef MIRRORED_INTERNAL
563 struct segment_type *init_mirrored_segtype(struct cmd_context *cmd)
564 #else				/* Shared */
565 struct segment_type *init_segtype(struct cmd_context *cmd);
566 struct segment_type *init_segtype(struct cmd_context *cmd)
567 #endif
568 {
569 	struct segment_type *segtype = dm_malloc(sizeof(*segtype));
570 
571 	if (!segtype)
572 		return_NULL;
573 
574 	segtype->cmd = cmd;
575 	segtype->ops = &_mirrored_ops;
576 	segtype->name = "mirror";
577 	segtype->private = NULL;
578 	segtype->flags = SEG_AREAS_MIRRORED | SEG_MONITORED;
579 
580 	log_very_verbose("Initialised segtype: %s", segtype->name);
581 
582 	return segtype;
583 }
584