1 /* $NetBSD: libdm-iface.c,v 1.3 2010/12/26 14:48:34 christos Exp $ */
2
3 /*
4 * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
5 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
6 *
7 * This file is part of the device-mapper userspace tools.
8 *
9 * This copyrighted material is made available to anyone wishing to use,
10 * modify, copy, or redistribute it subject to the terms and conditions
11 * of the GNU Lesser General Public License v.2.1.
12 *
13 * You should have received a copy of the GNU Lesser General Public License
14 * along with this program; if not, write to the Free Software Foundation,
15 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
17
18 #include "dmlib.h"
19 #include "libdm-targets.h"
20 #include "libdm-common.h"
21
22 #ifdef DM_COMPAT
23 # include "libdm-compat.h"
24 #endif
25
26 #include <fcntl.h>
27 #include <dirent.h>
28 #include <sys/ioctl.h>
29 #include <sys/utsname.h>
30 #include <limits.h>
31
32 #ifdef linux
33 # include "kdev_t.h"
34 # include <linux/limits.h>
35 #else
36 # define MAJOR(x) major((x))
37 # define MINOR(x) minor((x))
38 # define MKDEV(x,y) makedev((x),(y))
39 #endif
40
41 #include "dm-ioctl.h"
42
43 /*
44 * Ensure build compatibility.
45 * The hard-coded versions here are the highest present
46 * in the _cmd_data arrays.
47 */
48
49 #if !((DM_VERSION_MAJOR == 1 && DM_VERSION_MINOR >= 0) || \
50 (DM_VERSION_MAJOR == 4 && DM_VERSION_MINOR >= 0))
51 #error The version of dm-ioctl.h included is incompatible.
52 #endif
53
54 /* FIXME This should be exported in device-mapper.h */
55 #define DM_NAME "device-mapper"
56
57 #define PROC_MISC "/proc/misc"
58 #define PROC_DEVICES "/proc/devices"
59 #define MISC_NAME "misc"
60
61 #define NUMBER_OF_MAJORS 4096
62
63 /* dm major version no for running kernel */
64 static unsigned _dm_version = DM_VERSION_MAJOR;
65 static unsigned _dm_version_minor = 0;
66 static unsigned _dm_version_patchlevel = 0;
67 static int _log_suppress = 0;
68
69 /*
70 * If the kernel dm driver only supports one major number
71 * we store it in _dm_device_major. Otherwise we indicate
72 * which major numbers have been claimed by device-mapper
73 * in _dm_bitset.
74 */
75 static unsigned _dm_multiple_major_support = 1;
76 static dm_bitset_t _dm_bitset = NULL;
77 static uint32_t _dm_device_major = 0;
78
79 static int _control_fd = -1;
80 static int _version_checked = 0;
81 static int _version_ok = 1;
82 static unsigned _ioctl_buffer_double_factor = 0;
83
84
85 /*
86 * Support both old and new major numbers to ease the transition.
87 * Clumsy, but only temporary.
88 */
89 #if DM_VERSION_MAJOR == 4 && defined(DM_COMPAT)
90 const int _dm_compat = 1;
91 #else
92 const int _dm_compat = 0;
93 #endif
94
95
96 /* *INDENT-OFF* */
97 static struct cmd_data _cmd_data_v4[] = {
98 {"create", DM_DEV_CREATE, {4, 0, 0}},
99 {"reload", DM_TABLE_LOAD, {4, 0, 0}},
100 {"remove", DM_DEV_REMOVE, {4, 0, 0}},
101 {"remove_all", DM_REMOVE_ALL, {4, 0, 0}},
102 {"suspend", DM_DEV_SUSPEND, {4, 0, 0}},
103 {"resume", DM_DEV_SUSPEND, {4, 0, 0}},
104 {"info", DM_DEV_STATUS, {4, 0, 0}},
105 {"deps", DM_TABLE_DEPS, {4, 0, 0}},
106 {"rename", DM_DEV_RENAME, {4, 0, 0}},
107 {"version", DM_VERSION, {4, 0, 0}},
108 {"status", DM_TABLE_STATUS, {4, 0, 0}},
109 {"table", DM_TABLE_STATUS, {4, 0, 0}},
110 {"waitevent", DM_DEV_WAIT, {4, 0, 0}},
111 {"names", DM_LIST_DEVICES, {4, 0, 0}},
112 {"clear", DM_TABLE_CLEAR, {4, 0, 0}},
113 {"mknodes", DM_DEV_STATUS, {4, 0, 0}},
114 #ifdef DM_LIST_VERSIONS
115 {"versions", DM_LIST_VERSIONS, {4, 1, 0}},
116 #endif
117 #ifdef DM_TARGET_MSG
118 {"message", DM_TARGET_MSG, {4, 2, 0}},
119 #endif
120 #ifdef DM_DEV_SET_GEOMETRY
121 {"setgeometry", DM_DEV_SET_GEOMETRY, {4, 6, 0}},
122 #endif
123 };
124 /* *INDENT-ON* */
125
126 #define ALIGNMENT_V1 sizeof(int)
127 #define ALIGNMENT 8
128
129 /* FIXME Rejig library to record & use errno instead */
130 #ifndef DM_EXISTS_FLAG
131 # define DM_EXISTS_FLAG 0x00000004
132 #endif
133
_align(void * ptr,unsigned int a)134 static void *_align(void *ptr, unsigned int a)
135 {
136 register unsigned long agn = --a;
137
138 return (void *) (((unsigned long) ptr + agn) & ~agn);
139 }
140
141 #ifdef DM_IOCTLS
142 /*
143 * Set number to NULL to populate _dm_bitset - otherwise first
144 * match is returned.
145 */
_get_proc_number(const char * file,const char * name,uint32_t * number)146 static int _get_proc_number(const char *file, const char *name,
147 uint32_t *number)
148 {
149 FILE *fl;
150 char nm[256];
151 int c;
152 uint32_t num;
153
154 if (!(fl = fopen(file, "r"))) {
155 log_sys_error("fopen", file);
156 return 0;
157 }
158
159 while (!feof(fl)) {
160 if (fscanf(fl, "%d %255s\n", &num, &nm[0]) == 2) {
161 if (!strcmp(name, nm)) {
162 if (number) {
163 *number = num;
164 if (fclose(fl))
165 log_sys_error("fclose", file);
166 return 1;
167 }
168 dm_bit_set(_dm_bitset, num);
169 }
170 } else do {
171 c = fgetc(fl);
172 } while (c != EOF && c != '\n');
173 }
174 if (fclose(fl))
175 log_sys_error("fclose", file);
176
177 if (number) {
178 log_error("%s: No entry for %s found", file, name);
179 return 0;
180 }
181
182 return 1;
183 }
184
_control_device_number(uint32_t * major,uint32_t * minor)185 static int _control_device_number(uint32_t *major, uint32_t *minor)
186 {
187 if (!_get_proc_number(PROC_DEVICES, MISC_NAME, major) ||
188 !_get_proc_number(PROC_MISC, DM_NAME, minor)) {
189 *major = 0;
190 return 0;
191 }
192
193 return 1;
194 }
195
196 /*
197 * Returns 1 if exists; 0 if it doesn't; -1 if it's wrong
198 */
_control_exists(const char * control,uint32_t major,uint32_t minor)199 static int _control_exists(const char *control, uint32_t major, uint32_t minor)
200 {
201 struct stat buf;
202
203 if (stat(control, &buf) < 0) {
204 if (errno != ENOENT)
205 log_sys_error("stat", control);
206 return 0;
207 }
208
209 if (!S_ISCHR(buf.st_mode)) {
210 log_verbose("%s: Wrong inode type", control);
211 if (!unlink(control))
212 return 0;
213 log_sys_error("unlink", control);
214 return -1;
215 }
216
217 if (major && buf.st_rdev != MKDEV(major, minor)) {
218 log_verbose("%s: Wrong device number: (%u, %u) instead of "
219 "(%u, %u)", control,
220 MAJOR(buf.st_mode), MINOR(buf.st_mode),
221 major, minor);
222 if (!unlink(control))
223 return 0;
224 log_sys_error("unlink", control);
225 return -1;
226 }
227
228 return 1;
229 }
230
_create_control(const char * control,uint32_t major,uint32_t minor)231 static int _create_control(const char *control, uint32_t major, uint32_t minor)
232 {
233 int ret;
234 mode_t old_umask;
235
236 if (!major)
237 return 0;
238
239 old_umask = umask(DM_DEV_DIR_UMASK);
240 ret = dm_create_dir(dm_dir());
241 umask(old_umask);
242
243 if (!ret)
244 return 0;
245
246 log_verbose("Creating device %s (%u, %u)", control, major, minor);
247
248 old_umask = umask(0);
249 if (mknod(control, S_IFCHR | DM_DEVICE_MODE,
250 MKDEV(major, minor)) < 0) {
251 umask(old_umask);
252 log_sys_error("mknod", control);
253 return 0;
254 }
255 umask(old_umask);
256 if (chown(control, DM_DEVICE_UID, DM_DEVICE_GID) == -1) {
257 log_sys_error("cbown", control);
258 return 0;
259 }
260
261 #ifdef HAVE_SELINUX
262 if (!dm_set_selinux_context(control, S_IFCHR)) {
263 stack;
264 return 0;
265 }
266 #endif
267
268 return 1;
269 }
270 #endif
271
272 /*
273 * FIXME Update bitset in long-running process if dm claims new major numbers.
274 */
_create_dm_bitset(void)275 static int _create_dm_bitset(void)
276 {
277 #ifdef DM_IOCTLS
278 struct utsname uts;
279
280 if (_dm_bitset || _dm_device_major)
281 return 1;
282
283 if (uname(&uts))
284 return 0;
285
286 /*
287 * 2.6 kernels are limited to one major number.
288 * Assume 2.4 kernels are patched not to.
289 * FIXME Check _dm_version and _dm_version_minor if 2.6 changes this.
290 */
291 if (!strncmp(uts.release, "2.6.", 4))
292 _dm_multiple_major_support = 0;
293
294 if (!_dm_multiple_major_support) {
295 if (!_get_proc_number(PROC_DEVICES, DM_NAME, &_dm_device_major))
296 return 0;
297 return 1;
298 }
299
300 /* Multiple major numbers supported */
301 if (!(_dm_bitset = dm_bitset_create(NULL, NUMBER_OF_MAJORS)))
302 return 0;
303
304 if (!_get_proc_number(PROC_DEVICES, DM_NAME, NULL)) {
305 dm_bitset_destroy(_dm_bitset);
306 _dm_bitset = NULL;
307 return 0;
308 }
309
310 return 1;
311 #else
312 return 0;
313 #endif
314 }
315
dm_is_dm_major(uint32_t major)316 int dm_is_dm_major(uint32_t major)
317 {
318 if (!_create_dm_bitset())
319 return 0;
320
321 if (_dm_multiple_major_support)
322 return dm_bit(_dm_bitset, major) ? 1 : 0;
323 else
324 return (major == _dm_device_major) ? 1 : 0;
325 }
326
_open_control(void)327 static int _open_control(void)
328 {
329 #ifdef DM_IOCTLS
330 char control[PATH_MAX];
331 uint32_t major = 0, minor;
332
333 if (_control_fd != -1)
334 return 1;
335
336 snprintf(control, sizeof(control), "%s/control", dm_dir());
337
338 if (!_control_device_number(&major, &minor))
339 log_error("Is device-mapper driver missing from kernel?");
340
341 if (!_control_exists(control, major, minor) &&
342 !_create_control(control, major, minor))
343 goto error;
344
345 if ((_control_fd = open(control, O_RDWR)) < 0) {
346 log_sys_error("open", control);
347 goto error;
348 }
349
350 if (!_create_dm_bitset()) {
351 log_error("Failed to set up list of device-mapper major numbers");
352 return 0;
353 }
354
355 return 1;
356
357 error:
358 log_error("Failure to communicate with kernel device-mapper driver.");
359 return 0;
360 #else
361 return 1;
362 #endif
363 }
364
dm_task_destroy(struct dm_task * dmt)365 void dm_task_destroy(struct dm_task *dmt)
366 {
367 struct target *t, *n;
368
369 for (t = dmt->head; t; t = n) {
370 n = t->next;
371 dm_free(t->params);
372 dm_free(t->type);
373 dm_free(t);
374 }
375
376 if (dmt->dev_name)
377 dm_free(dmt->dev_name);
378
379 if (dmt->newname)
380 dm_free(dmt->newname);
381
382 if (dmt->message)
383 dm_free(dmt->message);
384
385 if (dmt->dmi.v4)
386 dm_free(dmt->dmi.v4);
387
388 if (dmt->uuid)
389 dm_free(dmt->uuid);
390
391 dm_free(dmt);
392 }
393
394 /*
395 * Protocol Version 1 compatibility functions.
396 */
397
398 #ifdef DM_COMPAT
399
_dm_task_get_driver_version_v1(struct dm_task * dmt,char * version,size_t size)400 static int _dm_task_get_driver_version_v1(struct dm_task *dmt, char *version,
401 size_t size)
402 {
403 unsigned int *v;
404
405 if (!dmt->dmi.v1) {
406 version[0] = '\0';
407 return 0;
408 }
409
410 v = dmt->dmi.v1->version;
411 snprintf(version, size, "%u.%u.%u", v[0], v[1], v[2]);
412 return 1;
413 }
414
415 /* Unmarshall the target info returned from a status call */
_unmarshal_status_v1(struct dm_task * dmt,struct dm_ioctl_v1 * dmi)416 static int _unmarshal_status_v1(struct dm_task *dmt, struct dm_ioctl_v1 *dmi)
417 {
418 char *outbuf = (char *) dmi + dmi->data_start;
419 char *outptr = outbuf;
420 int32_t i;
421 struct dm_target_spec_v1 *spec;
422
423 for (i = 0; i < dmi->target_count; i++) {
424 spec = (struct dm_target_spec_v1 *) outptr;
425
426 if (!dm_task_add_target(dmt, spec->sector_start,
427 (uint64_t) spec->length,
428 spec->target_type,
429 outptr + sizeof(*spec))) {
430 return 0;
431 }
432
433 outptr = outbuf + spec->next;
434 }
435
436 return 1;
437 }
438
_dm_format_dev_v1(char * buf,int bufsize,uint32_t dev_major,uint32_t dev_minor)439 static int _dm_format_dev_v1(char *buf, int bufsize, uint32_t dev_major,
440 uint32_t dev_minor)
441 {
442 int r;
443
444 if (bufsize < 8)
445 return 0;
446
447 r = snprintf(buf, bufsize, "%03x:%03x", dev_major, dev_minor);
448 if (r < 0 || r > bufsize - 1)
449 return 0;
450
451 return 1;
452 }
453
_dm_task_get_info_v1(struct dm_task * dmt,struct dm_info * info)454 static int _dm_task_get_info_v1(struct dm_task *dmt, struct dm_info *info)
455 {
456 if (!dmt->dmi.v1)
457 return 0;
458
459 memset(info, 0, sizeof(*info));
460
461 info->exists = dmt->dmi.v1->flags & DM_EXISTS_FLAG ? 1 : 0;
462 if (!info->exists)
463 return 1;
464
465 info->suspended = dmt->dmi.v1->flags & DM_SUSPEND_FLAG ? 1 : 0;
466 info->read_only = dmt->dmi.v1->flags & DM_READONLY_FLAG ? 1 : 0;
467 info->target_count = dmt->dmi.v1->target_count;
468 info->open_count = dmt->dmi.v1->open_count;
469 info->event_nr = 0;
470 info->major = MAJOR(dmt->dmi.v1->dev);
471 info->minor = MINOR(dmt->dmi.v1->dev);
472 info->live_table = 1;
473 info->inactive_table = 0;
474
475 return 1;
476 }
477
_dm_task_get_name_v1(const struct dm_task * dmt)478 static const char *_dm_task_get_name_v1(const struct dm_task *dmt)
479 {
480 return (dmt->dmi.v1->name);
481 }
482
_dm_task_get_uuid_v1(const struct dm_task * dmt)483 static const char *_dm_task_get_uuid_v1(const struct dm_task *dmt)
484 {
485 return (dmt->dmi.v1->uuid);
486 }
487
_dm_task_get_deps_v1(struct dm_task * dmt)488 static struct dm_deps *_dm_task_get_deps_v1(struct dm_task *dmt)
489 {
490 log_error("deps version 1 no longer supported by libdevmapper");
491 return NULL;
492 }
493
_dm_task_get_names_v1(struct dm_task * dmt)494 static struct dm_names *_dm_task_get_names_v1(struct dm_task *dmt)
495 {
496 return (struct dm_names *) (((void *) dmt->dmi.v1) +
497 dmt->dmi.v1->data_start);
498 }
499
_add_target_v1(struct target * t,void * out,void * end)500 static void *_add_target_v1(struct target *t, void *out, void *end)
501 {
502 void *out_sp = out;
503 struct dm_target_spec_v1 sp;
504 size_t sp_size = sizeof(struct dm_target_spec_v1);
505 int len;
506 const char no_space[] = "Ran out of memory building ioctl parameter";
507
508 out += sp_size;
509 if (out >= end) {
510 log_error(no_space);
511 return NULL;
512 }
513
514 sp.status = 0;
515 sp.sector_start = t->start;
516 sp.length = t->length;
517 strncpy(sp.target_type, t->type, sizeof(sp.target_type));
518
519 len = strlen(t->params);
520
521 if ((out + len + 1) >= end) {
522 log_error(no_space);
523
524 log_error("t->params= '%s'", t->params);
525 return NULL;
526 }
527 strcpy((char *) out, t->params);
528 out += len + 1;
529
530 /* align next block */
531 out = _align(out, ALIGNMENT_V1);
532
533 sp.next = out - out_sp;
534
535 memcpy(out_sp, &sp, sp_size);
536
537 return out;
538 }
539
_flatten_v1(struct dm_task * dmt)540 static struct dm_ioctl_v1 *_flatten_v1(struct dm_task *dmt)
541 {
542 const size_t min_size = 16 * 1024;
543 const int (*version)[3];
544
545 struct dm_ioctl_v1 *dmi;
546 struct target *t;
547 size_t len = sizeof(struct dm_ioctl_v1);
548 void *b, *e;
549 int count = 0;
550
551 for (t = dmt->head; t; t = t->next) {
552 len += sizeof(struct dm_target_spec_v1);
553 len += strlen(t->params) + 1 + ALIGNMENT_V1;
554 count++;
555 }
556
557 if (count && dmt->newname) {
558 log_error("targets and newname are incompatible");
559 return NULL;
560 }
561
562 if (dmt->newname)
563 len += strlen(dmt->newname) + 1;
564
565 /*
566 * Give len a minimum size so that we have space to store
567 * dependencies or status information.
568 */
569 if (len < min_size)
570 len = min_size;
571
572 if (!(dmi = dm_malloc(len)))
573 return NULL;
574
575 memset(dmi, 0, len);
576
577 version = &_cmd_data_v1[dmt->type].version;
578
579 dmi->version[0] = (*version)[0];
580 dmi->version[1] = (*version)[1];
581 dmi->version[2] = (*version)[2];
582
583 dmi->data_size = len;
584 dmi->data_start = sizeof(struct dm_ioctl_v1);
585
586 if (dmt->dev_name)
587 strncpy(dmi->name, dmt->dev_name, sizeof(dmi->name));
588
589 if (dmt->type == DM_DEVICE_SUSPEND)
590 dmi->flags |= DM_SUSPEND_FLAG;
591 if (dmt->read_only)
592 dmi->flags |= DM_READONLY_FLAG;
593
594 if (dmt->minor >= 0) {
595 if (dmt->major <= 0) {
596 log_error("Missing major number for persistent device");
597 return NULL;
598 }
599 dmi->flags |= DM_PERSISTENT_DEV_FLAG;
600 dmi->dev = MKDEV(dmt->major, dmt->minor);
601 }
602
603 if (dmt->uuid)
604 strncpy(dmi->uuid, dmt->uuid, sizeof(dmi->uuid));
605
606 dmi->target_count = count;
607
608 b = (void *) (dmi + 1);
609 e = (void *) ((char *) dmi + len);
610
611 for (t = dmt->head; t; t = t->next)
612 if (!(b = _add_target_v1(t, b, e)))
613 goto bad;
614
615 if (dmt->newname)
616 strcpy(b, dmt->newname);
617
618 return dmi;
619
620 bad:
621 dm_free(dmi);
622 return NULL;
623 }
624
_dm_names_v1(struct dm_ioctl_v1 * dmi)625 static int _dm_names_v1(struct dm_ioctl_v1 *dmi)
626 {
627 const char *dev_dir = dm_dir();
628 int r = 1, len;
629 const char *name;
630 struct dirent *dirent;
631 DIR *d;
632 struct dm_names *names, *old_names = NULL;
633 void *end = (void *) dmi + dmi->data_size;
634 struct stat buf;
635 char path[PATH_MAX];
636
637 log_warn("WARNING: Device list may be incomplete with interface "
638 "version 1.");
639 log_warn("Please upgrade your kernel device-mapper driver.");
640
641 if (!(d = opendir(dev_dir))) {
642 log_sys_error("opendir", dev_dir);
643 return 0;
644 }
645
646 names = (struct dm_names *) ((void *) dmi + dmi->data_start);
647
648 names->dev = 0; /* Flags no data */
649
650 while ((dirent = readdir(d))) {
651 name = dirent->d_name;
652
653 if (name[0] == '.' || !strcmp(name, "control"))
654 continue;
655
656 if (old_names)
657 old_names->next = (uint32_t) ((void *) names -
658 (void *) old_names);
659 snprintf(path, sizeof(path), "%s/%s", dev_dir, name);
660 if (stat(path, &buf)) {
661 log_sys_error("stat", path);
662 continue;
663 }
664 if (!S_ISBLK(buf.st_mode))
665 continue;
666 names->dev = (uint64_t) buf.st_rdev;
667 names->next = 0;
668 len = strlen(name);
669 if (((void *) (names + 1) + len + 1) >= end) {
670 log_error("Insufficient buffer space for device list");
671 r = 0;
672 break;
673 }
674
675 strcpy(names->name, name);
676
677 old_names = names;
678 names = _align((void *) ++names + len + 1, ALIGNMENT);
679 }
680
681 if (closedir(d))
682 log_sys_error("closedir", dev_dir);
683
684 return r;
685 }
686
_dm_task_run_v1(struct dm_task * dmt)687 static int _dm_task_run_v1(struct dm_task *dmt)
688 {
689 struct dm_ioctl_v1 *dmi;
690 unsigned int command;
691
692 dmi = _flatten_v1(dmt);
693 if (!dmi) {
694 log_error("Couldn't create ioctl argument.");
695 return 0;
696 }
697
698 if (!_open_control())
699 return 0;
700
701 if ((unsigned) dmt->type >=
702 (sizeof(_cmd_data_v1) / sizeof(*_cmd_data_v1))) {
703 log_error("Internal error: unknown device-mapper task %d",
704 dmt->type);
705 goto bad;
706 }
707
708 command = _cmd_data_v1[dmt->type].cmd;
709
710 if (dmt->type == DM_DEVICE_TABLE)
711 dmi->flags |= DM_STATUS_TABLE_FLAG;
712
713 log_debug("dm %s %s %s%s%s [%u]", _cmd_data_v1[dmt->type].name,
714 dmi->name, dmi->uuid, dmt->newname ? " " : "",
715 dmt->newname ? dmt->newname : "",
716 dmi->data_size);
717 if (dmt->type == DM_DEVICE_LIST) {
718 if (!_dm_names_v1(dmi))
719 goto bad;
720 }
721 #ifdef DM_IOCTLS
722 else if (ioctl(_control_fd, command, dmi) < 0) {
723 if (_log_suppress)
724 log_verbose("device-mapper: %s ioctl failed: %s",
725 _cmd_data_v1[dmt->type].name,
726 strerror(errno));
727 else
728 log_error("device-mapper: %s ioctl failed: %s",
729 _cmd_data_v1[dmt->type].name,
730 strerror(errno));
731 goto bad;
732 }
733 #else /* Userspace alternative for testing */
734 #endif
735
736 if (dmi->flags & DM_BUFFER_FULL_FLAG)
737 /* FIXME Increase buffer size and retry operation (if query) */
738 log_error("WARNING: libdevmapper buffer too small for data");
739
740 switch (dmt->type) {
741 case DM_DEVICE_CREATE:
742 add_dev_node(dmt->dev_name, MAJOR(dmi->dev), MINOR(dmi->dev),
743 dmt->uid, dmt->gid, dmt->mode, 0);
744 break;
745
746 case DM_DEVICE_REMOVE:
747 rm_dev_node(dmt->dev_name, 0);
748 break;
749
750 case DM_DEVICE_RENAME:
751 rename_dev_node(dmt->dev_name, dmt->newname, 0);
752 break;
753
754 case DM_DEVICE_MKNODES:
755 if (dmi->flags & DM_EXISTS_FLAG)
756 add_dev_node(dmt->dev_name, MAJOR(dmi->dev),
757 MINOR(dmi->dev), dmt->uid,
758 dmt->gid, dmt->mode, 0);
759 else
760 rm_dev_node(dmt->dev_name, 0);
761 break;
762
763 case DM_DEVICE_STATUS:
764 case DM_DEVICE_TABLE:
765 if (!_unmarshal_status_v1(dmt, dmi))
766 goto bad;
767 break;
768
769 case DM_DEVICE_SUSPEND:
770 case DM_DEVICE_RESUME:
771 dmt->type = DM_DEVICE_INFO;
772 if (!dm_task_run(dmt))
773 goto bad;
774 dm_free(dmi); /* We'll use what info returned */
775 return 1;
776 }
777
778 dmt->dmi.v1 = dmi;
779 return 1;
780
781 bad:
782 dm_free(dmi);
783 return 0;
784 }
785
786 #endif
787
788 /*
789 * Protocol Version 4 functions.
790 */
791
dm_task_get_driver_version(struct dm_task * dmt,char * version,size_t size)792 int dm_task_get_driver_version(struct dm_task *dmt, char *version, size_t size)
793 {
794 unsigned *v;
795
796 #ifdef DM_COMPAT
797 if (_dm_version == 1)
798 return _dm_task_get_driver_version_v1(dmt, version, size);
799 #endif
800
801 if (!dmt->dmi.v4) {
802 version[0] = '\0';
803 return 0;
804 }
805
806 v = dmt->dmi.v4->version;
807 snprintf(version, size, "%u.%u.%u", v[0], v[1], v[2]);
808 _dm_version_minor = v[1];
809 _dm_version_patchlevel = v[2];
810
811 return 1;
812 }
813
_check_version(char * version,size_t size,int log_suppress)814 static int _check_version(char *version, size_t size, int log_suppress)
815 {
816 struct dm_task *task;
817 int r;
818
819 if (!(task = dm_task_create(DM_DEVICE_VERSION))) {
820 log_error("Failed to get device-mapper version");
821 version[0] = '\0';
822 return 0;
823 }
824
825 if (log_suppress)
826 _log_suppress = 1;
827
828 r = dm_task_run(task);
829 dm_task_get_driver_version(task, version, size);
830 dm_task_destroy(task);
831 _log_suppress = 0;
832
833 return r;
834 }
835
836 /*
837 * Find out device-mapper's major version number the first time
838 * this is called and whether or not we support it.
839 */
dm_check_version(void)840 int dm_check_version(void)
841 {
842 char libversion[64], dmversion[64];
843 const char *compat = "";
844
845 if (_version_checked)
846 return _version_ok;
847
848 _version_checked = 1;
849
850 if (_check_version(dmversion, sizeof(dmversion), _dm_compat))
851 return 1;
852
853 if (!_dm_compat)
854 goto bad;
855
856 log_verbose("device-mapper ioctl protocol version %u failed. "
857 "Trying protocol version 1.", _dm_version);
858 _dm_version = 1;
859 if (_check_version(dmversion, sizeof(dmversion), 0)) {
860 log_verbose("Using device-mapper ioctl protocol version 1");
861 return 1;
862 }
863
864 compat = "(compat)";
865
866 dm_get_library_version(libversion, sizeof(libversion));
867
868 log_error("Incompatible libdevmapper %s%s and kernel driver %s",
869 libversion, compat, dmversion);
870
871 bad:
872 _version_ok = 0;
873 return 0;
874 }
875
dm_cookie_supported(void)876 int dm_cookie_supported(void)
877 {
878 return (dm_check_version() &&
879 _dm_version >= 4 &&
880 _dm_version_minor >= 15);
881 }
882
dm_get_next_target(struct dm_task * dmt,void * next,uint64_t * start,uint64_t * length,char ** target_type,char ** params)883 void *dm_get_next_target(struct dm_task *dmt, void *next,
884 uint64_t *start, uint64_t *length,
885 char **target_type, char **params)
886 {
887 struct target *t = (struct target *) next;
888
889 if (!t)
890 t = dmt->head;
891
892 if (!t)
893 return NULL;
894
895 *start = t->start;
896 *length = t->length;
897 *target_type = t->type;
898 *params = t->params;
899
900 return t->next;
901 }
902
903 /* Unmarshall the target info returned from a status call */
_unmarshal_status(struct dm_task * dmt,struct dm_ioctl * dmi)904 static int _unmarshal_status(struct dm_task *dmt, struct dm_ioctl *dmi)
905 {
906 char *outbuf = (char *) dmi + dmi->data_start;
907 char *outptr = outbuf;
908 uint32_t i;
909 struct dm_target_spec *spec;
910
911 for (i = 0; i < dmi->target_count; i++) {
912 spec = (struct dm_target_spec *) outptr;
913 if (!dm_task_add_target(dmt, spec->sector_start,
914 spec->length,
915 spec->target_type,
916 outptr + sizeof(*spec))) {
917 return 0;
918 }
919
920 outptr = outbuf + spec->next;
921 }
922
923 return 1;
924 }
925
dm_format_dev(char * buf,int bufsize,uint32_t dev_major,uint32_t dev_minor)926 int dm_format_dev(char *buf, int bufsize, uint32_t dev_major,
927 uint32_t dev_minor)
928 {
929 int r;
930
931 #ifdef DM_COMPAT
932 if (_dm_version == 1)
933 return _dm_format_dev_v1(buf, bufsize, dev_major, dev_minor);
934 #endif
935
936 if (bufsize < 8)
937 return 0;
938
939 r = snprintf(buf, (size_t) bufsize, "%u:%u", dev_major, dev_minor);
940 if (r < 0 || r > bufsize - 1)
941 return 0;
942
943 return 1;
944 }
945
dm_task_get_info(struct dm_task * dmt,struct dm_info * info)946 int dm_task_get_info(struct dm_task *dmt, struct dm_info *info)
947 {
948 #ifdef DM_COMPAT
949 if (_dm_version == 1)
950 return _dm_task_get_info_v1(dmt, info);
951 #endif
952
953 if (!dmt->dmi.v4)
954 return 0;
955
956 memset(info, 0, sizeof(*info));
957
958 info->exists = dmt->dmi.v4->flags & DM_EXISTS_FLAG ? 1 : 0;
959 if (!info->exists)
960 return 1;
961
962 info->suspended = dmt->dmi.v4->flags & DM_SUSPEND_FLAG ? 1 : 0;
963 info->read_only = dmt->dmi.v4->flags & DM_READONLY_FLAG ? 1 : 0;
964 info->live_table = dmt->dmi.v4->flags & DM_ACTIVE_PRESENT_FLAG ? 1 : 0;
965 info->inactive_table = dmt->dmi.v4->flags & DM_INACTIVE_PRESENT_FLAG ?
966 1 : 0;
967 info->target_count = dmt->dmi.v4->target_count;
968 info->open_count = dmt->dmi.v4->open_count;
969 info->event_nr = dmt->dmi.v4->event_nr;
970 info->major = MAJOR(dmt->dmi.v4->dev);
971 info->minor = MINOR(dmt->dmi.v4->dev);
972
973 return 1;
974 }
975
dm_task_get_read_ahead(const struct dm_task * dmt,uint32_t * read_ahead)976 uint32_t dm_task_get_read_ahead(const struct dm_task *dmt, uint32_t *read_ahead)
977 {
978 const char *dev_name;
979
980 *read_ahead = 0;
981
982 #ifdef DM_COMPAT
983 /* Not supporting this */
984 if (_dm_version == 1)
985 return 1;
986 #endif
987
988 if (!dmt->dmi.v4 || !(dmt->dmi.v4->flags & DM_EXISTS_FLAG))
989 return 0;
990
991 if (*dmt->dmi.v4->name)
992 dev_name = dmt->dmi.v4->name;
993 else if (dmt->dev_name)
994 dev_name = dmt->dev_name;
995 else {
996 log_error("Get read ahead request failed: device name unrecorded.");
997 return 0;
998 }
999
1000 return get_dev_node_read_ahead(dev_name, read_ahead);
1001 }
1002
dm_task_get_name(const struct dm_task * dmt)1003 const char *dm_task_get_name(const struct dm_task *dmt)
1004 {
1005 #ifdef DM_COMPAT
1006 if (_dm_version == 1)
1007 return _dm_task_get_name_v1(dmt);
1008 #endif
1009
1010 return (dmt->dmi.v4->name);
1011 }
1012
dm_task_get_uuid(const struct dm_task * dmt)1013 const char *dm_task_get_uuid(const struct dm_task *dmt)
1014 {
1015 #ifdef DM_COMPAT
1016 if (_dm_version == 1)
1017 return _dm_task_get_uuid_v1(dmt);
1018 #endif
1019
1020 return (dmt->dmi.v4->uuid);
1021 }
1022
dm_task_get_deps(struct dm_task * dmt)1023 struct dm_deps *dm_task_get_deps(struct dm_task *dmt)
1024 {
1025 #ifdef DM_COMPAT
1026 if (_dm_version == 1)
1027 return _dm_task_get_deps_v1(dmt);
1028 #endif
1029
1030 return (struct dm_deps *) (((void *) dmt->dmi.v4) +
1031 dmt->dmi.v4->data_start);
1032 }
1033
dm_task_get_names(struct dm_task * dmt)1034 struct dm_names *dm_task_get_names(struct dm_task *dmt)
1035 {
1036 #ifdef DM_COMPAT
1037 if (_dm_version == 1)
1038 return _dm_task_get_names_v1(dmt);
1039 #endif
1040
1041 return (struct dm_names *) (((void *) dmt->dmi.v4) +
1042 dmt->dmi.v4->data_start);
1043 }
1044
dm_task_get_versions(struct dm_task * dmt)1045 struct dm_versions *dm_task_get_versions(struct dm_task *dmt)
1046 {
1047 return (struct dm_versions *) (((void *) dmt->dmi.v4) +
1048 dmt->dmi.v4->data_start);
1049 }
1050
dm_task_set_ro(struct dm_task * dmt)1051 int dm_task_set_ro(struct dm_task *dmt)
1052 {
1053 dmt->read_only = 1;
1054 return 1;
1055 }
1056
dm_task_set_read_ahead(struct dm_task * dmt,uint32_t read_ahead,uint32_t read_ahead_flags)1057 int dm_task_set_read_ahead(struct dm_task *dmt, uint32_t read_ahead,
1058 uint32_t read_ahead_flags)
1059 {
1060 dmt->read_ahead = read_ahead;
1061 dmt->read_ahead_flags = read_ahead_flags;
1062
1063 return 1;
1064 }
1065
dm_task_suppress_identical_reload(struct dm_task * dmt)1066 int dm_task_suppress_identical_reload(struct dm_task *dmt)
1067 {
1068 dmt->suppress_identical_reload = 1;
1069 return 1;
1070 }
1071
dm_task_set_newname(struct dm_task * dmt,const char * newname)1072 int dm_task_set_newname(struct dm_task *dmt, const char *newname)
1073 {
1074 if (strchr(newname, '/')) {
1075 log_error("Name \"%s\" invalid. It contains \"/\".", newname);
1076 return 0;
1077 }
1078
1079 if (strlen(newname) >= DM_NAME_LEN) {
1080 log_error("Name \"%s\" too long", newname);
1081 return 0;
1082 }
1083
1084 if (!(dmt->newname = dm_strdup(newname))) {
1085 log_error("dm_task_set_newname: strdup(%s) failed", newname);
1086 return 0;
1087 }
1088
1089 return 1;
1090 }
1091
dm_task_set_message(struct dm_task * dmt,const char * message)1092 int dm_task_set_message(struct dm_task *dmt, const char *message)
1093 {
1094 if (!(dmt->message = dm_strdup(message))) {
1095 log_error("dm_task_set_message: strdup(%s) failed", message);
1096 return 0;
1097 }
1098
1099 return 1;
1100 }
1101
dm_task_set_sector(struct dm_task * dmt,uint64_t sector)1102 int dm_task_set_sector(struct dm_task *dmt, uint64_t sector)
1103 {
1104 dmt->sector = sector;
1105
1106 return 1;
1107 }
1108
dm_task_set_geometry(struct dm_task * dmt,const char * cylinders,const char * heads,const char * sectors,const char * start)1109 int dm_task_set_geometry(struct dm_task *dmt, const char *cylinders, const char *heads, const char *sectors, const char *start)
1110 {
1111 size_t len = strlen(cylinders) + 1 + strlen(heads) + 1 + strlen(sectors) + 1 + strlen(start) + 1;
1112
1113 if (!(dmt->geometry = dm_malloc(len))) {
1114 log_error("dm_task_set_geometry: dm_malloc failed");
1115 return 0;
1116 }
1117
1118 if (sprintf(dmt->geometry, "%s %s %s %s", cylinders, heads, sectors, start) < 0) {
1119 log_error("dm_task_set_geometry: sprintf failed");
1120 return 0;
1121 }
1122
1123 return 1;
1124 }
1125
dm_task_no_flush(struct dm_task * dmt)1126 int dm_task_no_flush(struct dm_task *dmt)
1127 {
1128 dmt->no_flush = 1;
1129
1130 return 1;
1131 }
1132
dm_task_no_open_count(struct dm_task * dmt)1133 int dm_task_no_open_count(struct dm_task *dmt)
1134 {
1135 dmt->no_open_count = 1;
1136
1137 return 1;
1138 }
1139
dm_task_skip_lockfs(struct dm_task * dmt)1140 int dm_task_skip_lockfs(struct dm_task *dmt)
1141 {
1142 dmt->skip_lockfs = 1;
1143
1144 return 1;
1145 }
1146
dm_task_query_inactive_table(struct dm_task * dmt)1147 int dm_task_query_inactive_table(struct dm_task *dmt)
1148 {
1149 dmt->query_inactive_table = 1;
1150
1151 return 1;
1152 }
1153
dm_task_set_event_nr(struct dm_task * dmt,uint32_t event_nr)1154 int dm_task_set_event_nr(struct dm_task *dmt, uint32_t event_nr)
1155 {
1156 dmt->event_nr = event_nr;
1157
1158 return 1;
1159 }
1160
create_target(uint64_t start,uint64_t len,const char * type,const char * params)1161 struct target *create_target(uint64_t start, uint64_t len, const char *type,
1162 const char *params)
1163 {
1164 struct target *t = dm_malloc(sizeof(*t));
1165
1166 if (!t) {
1167 log_error("create_target: malloc(%" PRIsize_t ") failed",
1168 sizeof(*t));
1169 return NULL;
1170 }
1171
1172 memset(t, 0, sizeof(*t));
1173
1174 if (!(t->params = dm_strdup(params))) {
1175 log_error("create_target: strdup(params) failed");
1176 goto bad;
1177 }
1178
1179 if (!(t->type = dm_strdup(type))) {
1180 log_error("create_target: strdup(type) failed");
1181 goto bad;
1182 }
1183
1184 t->start = start;
1185 t->length = len;
1186 return t;
1187
1188 bad:
1189 dm_free(t->params);
1190 dm_free(t->type);
1191 dm_free(t);
1192 return NULL;
1193 }
1194
_add_target(struct target * t,void * out,void * end)1195 static void *_add_target(struct target *t, void *out, void *end)
1196 {
1197 void *out_sp = out;
1198 struct dm_target_spec sp;
1199 size_t sp_size = sizeof(struct dm_target_spec);
1200 int len;
1201 const char no_space[] = "Ran out of memory building ioctl parameter";
1202
1203 out += sp_size;
1204 if (out >= end) {
1205 log_error(no_space);
1206 return NULL;
1207 }
1208
1209 sp.status = 0;
1210 sp.sector_start = t->start;
1211 sp.length = t->length;
1212 strncpy(sp.target_type, t->type, sizeof(sp.target_type));
1213
1214 len = strlen(t->params);
1215
1216 if ((out + len + 1) >= end) {
1217 log_error(no_space);
1218
1219 log_error("t->params= '%s'", t->params);
1220 return NULL;
1221 }
1222 strcpy((char *) out, t->params);
1223 out += len + 1;
1224
1225 /* align next block */
1226 out = _align(out, ALIGNMENT);
1227
1228 sp.next = out - out_sp;
1229 memcpy(out_sp, &sp, sp_size);
1230
1231 return out;
1232 }
1233
_lookup_dev_name(uint64_t dev,char * buf,size_t len)1234 static int _lookup_dev_name(uint64_t dev, char *buf, size_t len)
1235 {
1236 struct dm_names *names;
1237 unsigned next = 0;
1238 struct dm_task *dmt;
1239 int r = 0;
1240
1241 if (!(dmt = dm_task_create(DM_DEVICE_LIST)))
1242 return 0;
1243
1244 if (!dm_task_run(dmt))
1245 goto out;
1246
1247 if (!(names = dm_task_get_names(dmt)))
1248 goto out;
1249
1250 if (!names->dev)
1251 goto out;
1252
1253 do {
1254 names = (void *) names + next;
1255 if (names->dev == dev) {
1256 strncpy(buf, names->name, len);
1257 r = 1;
1258 break;
1259 }
1260 next = names->next;
1261 } while (next);
1262
1263 out:
1264 dm_task_destroy(dmt);
1265 return r;
1266 }
1267
_flatten(struct dm_task * dmt,unsigned repeat_count)1268 static struct dm_ioctl *_flatten(struct dm_task *dmt, unsigned repeat_count)
1269 {
1270 const size_t min_size = 16 * 1024;
1271 const int (*version)[3];
1272
1273 struct dm_ioctl *dmi;
1274 struct target *t;
1275 struct dm_target_msg *tmsg;
1276 size_t len = sizeof(struct dm_ioctl);
1277 void *b, *e;
1278 int count = 0;
1279
1280 for (t = dmt->head; t; t = t->next) {
1281 len += sizeof(struct dm_target_spec);
1282 len += strlen(t->params) + 1 + ALIGNMENT;
1283 count++;
1284 }
1285
1286 if (count && (dmt->sector || dmt->message)) {
1287 log_error("targets and message are incompatible");
1288 return NULL;
1289 }
1290
1291 if (count && dmt->newname) {
1292 log_error("targets and newname are incompatible");
1293 return NULL;
1294 }
1295
1296 if (count && dmt->geometry) {
1297 log_error("targets and geometry are incompatible");
1298 return NULL;
1299 }
1300
1301 if (dmt->newname && (dmt->sector || dmt->message)) {
1302 log_error("message and newname are incompatible");
1303 return NULL;
1304 }
1305
1306 if (dmt->newname && dmt->geometry) {
1307 log_error("geometry and newname are incompatible");
1308 return NULL;
1309 }
1310
1311 if (dmt->geometry && (dmt->sector || dmt->message)) {
1312 log_error("geometry and message are incompatible");
1313 return NULL;
1314 }
1315
1316 if (dmt->sector && !dmt->message) {
1317 log_error("message is required with sector");
1318 return NULL;
1319 }
1320
1321 if (dmt->newname)
1322 len += strlen(dmt->newname) + 1;
1323
1324 if (dmt->message)
1325 len += sizeof(struct dm_target_msg) + strlen(dmt->message) + 1;
1326
1327 if (dmt->geometry)
1328 len += strlen(dmt->geometry) + 1;
1329
1330 /*
1331 * Give len a minimum size so that we have space to store
1332 * dependencies or status information.
1333 */
1334 if (len < min_size)
1335 len = min_size;
1336
1337 /* Increase buffer size if repeating because buffer was too small */
1338 while (repeat_count--)
1339 len *= 2;
1340
1341 if (!(dmi = dm_malloc(len)))
1342 return NULL;
1343
1344 memset(dmi, 0, len);
1345
1346 version = &_cmd_data_v4[dmt->type].version;
1347
1348 dmi->version[0] = (*version)[0];
1349 dmi->version[1] = (*version)[1];
1350 dmi->version[2] = (*version)[2];
1351
1352 dmi->data_size = len;
1353 dmi->data_start = sizeof(struct dm_ioctl);
1354
1355 if (dmt->minor >= 0) {
1356 if (dmt->major <= 0) {
1357 log_error("Missing major number for persistent device.");
1358 goto bad;
1359 }
1360
1361 if (!_dm_multiple_major_support && dmt->allow_default_major_fallback &&
1362 dmt->major != _dm_device_major) {
1363 log_verbose("Overriding major number of %" PRIu32
1364 " with %" PRIu32 " for persistent device.",
1365 dmt->major, _dm_device_major);
1366 dmt->major = _dm_device_major;
1367 }
1368
1369 dmi->flags |= DM_PERSISTENT_DEV_FLAG;
1370 dmi->dev = MKDEV(dmt->major, dmt->minor);
1371 }
1372
1373 /* Does driver support device number referencing? */
1374 if (_dm_version_minor < 3 && !dmt->dev_name && !dmt->uuid && dmi->dev) {
1375 if (!_lookup_dev_name(dmi->dev, dmi->name, sizeof(dmi->name))) {
1376 log_error("Unable to find name for device (%" PRIu32
1377 ":%" PRIu32 ")", dmt->major, dmt->minor);
1378 goto bad;
1379 }
1380 log_verbose("device (%" PRIu32 ":%" PRIu32 ") is %s "
1381 "for compatibility with old kernel",
1382 dmt->major, dmt->minor, dmi->name);
1383 }
1384
1385 /* FIXME Until resume ioctl supplies name, use dev_name for readahead */
1386 if (dmt->dev_name && (dmt->type != DM_DEVICE_RESUME || dmt->minor < 0 ||
1387 dmt->major < 0))
1388 strncpy(dmi->name, dmt->dev_name, sizeof(dmi->name));
1389
1390 if (dmt->uuid)
1391 strncpy(dmi->uuid, dmt->uuid, sizeof(dmi->uuid));
1392
1393 if (dmt->type == DM_DEVICE_SUSPEND)
1394 dmi->flags |= DM_SUSPEND_FLAG;
1395 if (dmt->no_flush)
1396 dmi->flags |= DM_NOFLUSH_FLAG;
1397 if (dmt->read_only)
1398 dmi->flags |= DM_READONLY_FLAG;
1399 if (dmt->skip_lockfs)
1400 dmi->flags |= DM_SKIP_LOCKFS_FLAG;
1401 if (dmt->query_inactive_table) {
1402 if (_dm_version_minor < 16)
1403 log_warn("WARNING: Inactive table query unsupported "
1404 "by kernel. It will use live table.");
1405 dmi->flags |= DM_QUERY_INACTIVE_TABLE_FLAG;
1406 }
1407
1408 dmi->target_count = count;
1409 dmi->event_nr = dmt->event_nr;
1410
1411 b = (void *) (dmi + 1);
1412 e = (void *) ((char *) dmi + len);
1413
1414 for (t = dmt->head; t; t = t->next)
1415 if (!(b = _add_target(t, b, e)))
1416 goto bad;
1417
1418 if (dmt->newname)
1419 strcpy(b, dmt->newname);
1420
1421 if (dmt->message) {
1422 tmsg = (struct dm_target_msg *) b;
1423 tmsg->sector = dmt->sector;
1424 strcpy(tmsg->message, dmt->message);
1425 }
1426
1427 if (dmt->geometry)
1428 strcpy(b, dmt->geometry);
1429
1430 return dmi;
1431
1432 bad:
1433 dm_free(dmi);
1434 return NULL;
1435 }
1436
_process_mapper_dir(struct dm_task * dmt)1437 static int _process_mapper_dir(struct dm_task *dmt)
1438 {
1439 struct dirent *dirent;
1440 DIR *d;
1441 const char *dir;
1442 int r = 1;
1443
1444 dir = dm_dir();
1445 if (!(d = opendir(dir))) {
1446 log_sys_error("opendir", dir);
1447 return 0;
1448 }
1449
1450 while ((dirent = readdir(d))) {
1451 if (!strcmp(dirent->d_name, ".") ||
1452 !strcmp(dirent->d_name, "..") ||
1453 !strcmp(dirent->d_name, "control"))
1454 continue;
1455 dm_task_set_name(dmt, dirent->d_name);
1456 dm_task_run(dmt);
1457 }
1458
1459 if (closedir(d))
1460 log_sys_error("closedir", dir);
1461
1462 return r;
1463 }
1464
_process_all_v4(struct dm_task * dmt)1465 static int _process_all_v4(struct dm_task *dmt)
1466 {
1467 struct dm_task *task;
1468 struct dm_names *names;
1469 unsigned next = 0;
1470 int r = 1;
1471
1472 if (!(task = dm_task_create(DM_DEVICE_LIST)))
1473 return 0;
1474
1475 if (!dm_task_run(task)) {
1476 r = 0;
1477 goto out;
1478 }
1479
1480 if (!(names = dm_task_get_names(task))) {
1481 r = 0;
1482 goto out;
1483 }
1484
1485 if (!names->dev)
1486 goto out;
1487
1488 do {
1489 names = (void *) names + next;
1490 if (!dm_task_set_name(dmt, names->name)) {
1491 r = 0;
1492 goto out;
1493 }
1494 if (!dm_task_run(dmt))
1495 r = 0;
1496 next = names->next;
1497 } while (next);
1498
1499 out:
1500 dm_task_destroy(task);
1501 return r;
1502 }
1503
_mknodes_v4(struct dm_task * dmt)1504 static int _mknodes_v4(struct dm_task *dmt)
1505 {
1506 (void) _process_mapper_dir(dmt);
1507
1508 return _process_all_v4(dmt);
1509 }
1510
1511 /*
1512 * If an operation that uses a cookie fails, decrement the
1513 * semaphore instead of udev.
1514 */
_udev_complete(struct dm_task * dmt)1515 static int _udev_complete(struct dm_task *dmt)
1516 {
1517 uint32_t cookie;
1518
1519 if (dmt->cookie_set) {
1520 /* strip flags from the cookie and use cookie magic instead */
1521 cookie = (dmt->event_nr & ~DM_UDEV_FLAGS_MASK) |
1522 (DM_COOKIE_MAGIC << DM_UDEV_FLAGS_SHIFT);
1523 return dm_udev_complete(cookie);
1524 }
1525
1526 return 1;
1527 }
1528
_create_and_load_v4(struct dm_task * dmt)1529 static int _create_and_load_v4(struct dm_task *dmt)
1530 {
1531 struct dm_task *task;
1532 int r;
1533
1534 /* Use new task struct to create the device */
1535 if (!(task = dm_task_create(DM_DEVICE_CREATE))) {
1536 log_error("Failed to create device-mapper task struct");
1537 _udev_complete(dmt);
1538 return 0;
1539 }
1540
1541 /* Copy across relevant fields */
1542 if (dmt->dev_name && !dm_task_set_name(task, dmt->dev_name)) {
1543 dm_task_destroy(task);
1544 _udev_complete(dmt);
1545 return 0;
1546 }
1547
1548 if (dmt->uuid && !dm_task_set_uuid(task, dmt->uuid)) {
1549 dm_task_destroy(task);
1550 _udev_complete(dmt);
1551 return 0;
1552 }
1553
1554 task->major = dmt->major;
1555 task->minor = dmt->minor;
1556 task->uid = dmt->uid;
1557 task->gid = dmt->gid;
1558 task->mode = dmt->mode;
1559 /* FIXME: Just for udev_check in dm_task_run. Can we avoid this? */
1560 task->event_nr = dmt->event_nr & DM_UDEV_FLAGS_MASK;
1561 task->cookie_set = dmt->cookie_set;
1562
1563 r = dm_task_run(task);
1564 dm_task_destroy(task);
1565 if (!r) {
1566 _udev_complete(dmt);
1567 return 0;
1568 }
1569
1570 /* Next load the table */
1571 if (!(task = dm_task_create(DM_DEVICE_RELOAD))) {
1572 log_error("Failed to create device-mapper task struct");
1573 _udev_complete(dmt);
1574 return 0;
1575 }
1576
1577 /* Copy across relevant fields */
1578 if (dmt->dev_name && !dm_task_set_name(task, dmt->dev_name)) {
1579 dm_task_destroy(task);
1580 _udev_complete(dmt);
1581 return 0;
1582 }
1583
1584 task->read_only = dmt->read_only;
1585 task->head = dmt->head;
1586 task->tail = dmt->tail;
1587
1588 r = dm_task_run(task);
1589
1590 task->head = NULL;
1591 task->tail = NULL;
1592 dm_task_destroy(task);
1593 if (!r) {
1594 _udev_complete(dmt);
1595 goto revert;
1596 }
1597
1598 /* Use the original structure last so the info will be correct */
1599 dmt->type = DM_DEVICE_RESUME;
1600 dm_free(dmt->uuid);
1601 dmt->uuid = NULL;
1602
1603 r = dm_task_run(dmt);
1604
1605 if (r)
1606 return r;
1607
1608 revert:
1609 dmt->type = DM_DEVICE_REMOVE;
1610 dm_free(dmt->uuid);
1611 dmt->uuid = NULL;
1612 dmt->cookie_set = 0;
1613
1614 if (!dm_task_run(dmt))
1615 log_error("Failed to revert device creation.");
1616
1617 return r;
1618 }
1619
dm_task_get_existing_table_size(struct dm_task * dmt)1620 uint64_t dm_task_get_existing_table_size(struct dm_task *dmt)
1621 {
1622 return dmt->existing_table_size;
1623 }
1624
_reload_with_suppression_v4(struct dm_task * dmt)1625 static int _reload_with_suppression_v4(struct dm_task *dmt)
1626 {
1627 struct dm_task *task;
1628 struct target *t1, *t2;
1629 int r;
1630
1631 /* New task to get existing table information */
1632 if (!(task = dm_task_create(DM_DEVICE_TABLE))) {
1633 log_error("Failed to create device-mapper task struct");
1634 return 0;
1635 }
1636
1637 /* Copy across relevant fields */
1638 if (dmt->dev_name && !dm_task_set_name(task, dmt->dev_name)) {
1639 dm_task_destroy(task);
1640 return 0;
1641 }
1642
1643 if (dmt->uuid && !dm_task_set_uuid(task, dmt->uuid)) {
1644 dm_task_destroy(task);
1645 return 0;
1646 }
1647
1648 task->major = dmt->major;
1649 task->minor = dmt->minor;
1650
1651 r = dm_task_run(task);
1652
1653 if (!r) {
1654 dm_task_destroy(task);
1655 return r;
1656 }
1657
1658 /* Store existing table size */
1659 t2 = task->head;
1660 while (t2 && t2->next)
1661 t2 = t2->next;
1662 dmt->existing_table_size = t2 ? t2->start + t2->length : 0;
1663
1664 if ((task->dmi.v4->flags & DM_READONLY_FLAG) ? 1 : 0 != dmt->read_only)
1665 goto no_match;
1666
1667 t1 = dmt->head;
1668 t2 = task->head;
1669
1670 while (t1 && t2) {
1671 while (t2->params[strlen(t2->params) - 1] == ' ')
1672 t2->params[strlen(t2->params) - 1] = '\0';
1673 if ((t1->start != t2->start) ||
1674 (t1->length != t2->length) ||
1675 (strcmp(t1->type, t2->type)) ||
1676 (strcmp(t1->params, t2->params)))
1677 goto no_match;
1678 t1 = t1->next;
1679 t2 = t2->next;
1680 }
1681
1682 if (!t1 && !t2) {
1683 dmt->dmi.v4 = task->dmi.v4;
1684 task->dmi.v4 = NULL;
1685 dm_task_destroy(task);
1686 return 1;
1687 }
1688
1689 no_match:
1690 dm_task_destroy(task);
1691
1692 /* Now do the original reload */
1693 dmt->suppress_identical_reload = 0;
1694 r = dm_task_run(dmt);
1695
1696 return r;
1697 }
1698
_do_dm_ioctl(struct dm_task * dmt,unsigned command,unsigned repeat_count)1699 static struct dm_ioctl *_do_dm_ioctl(struct dm_task *dmt, unsigned command,
1700 unsigned repeat_count)
1701 {
1702 struct dm_ioctl *dmi;
1703
1704 dmi = _flatten(dmt, repeat_count);
1705 if (!dmi) {
1706 log_error("Couldn't create ioctl argument.");
1707 return NULL;
1708 }
1709
1710 if (dmt->type == DM_DEVICE_TABLE)
1711 dmi->flags |= DM_STATUS_TABLE_FLAG;
1712
1713 dmi->flags |= DM_EXISTS_FLAG; /* FIXME */
1714
1715 if (dmt->no_open_count)
1716 dmi->flags |= DM_SKIP_BDGET_FLAG;
1717
1718 /*
1719 * Prevent udev vs. libdevmapper race when processing nodes and
1720 * symlinks. This can happen when the udev rules are installed and
1721 * udev synchronisation code is enabled in libdevmapper but the
1722 * software using libdevmapper does not make use of it (by not calling
1723 * dm_task_set_cookie before). We need to instruct the udev rules not
1724 * to be applied at all in this situation so we can gracefully fallback
1725 * to libdevmapper's node and symlink creation code.
1726 */
1727 if (dm_udev_get_sync_support() && !dmt->cookie_set &&
1728 (dmt->type == DM_DEVICE_RESUME ||
1729 dmt->type == DM_DEVICE_REMOVE ||
1730 dmt->type == DM_DEVICE_RENAME)) {
1731 log_debug("Cookie value is not set while trying to call "
1732 "DM_DEVICE_RESUME, DM_DEVICE_REMOVE or DM_DEVICE_RENAME "
1733 "ioctl. Please, consider using libdevmapper's udev "
1734 "synchronisation interface or disable it explicitly "
1735 "by calling dm_udev_set_sync_support(0).");
1736 log_debug("Switching off device-mapper and all subsystem related "
1737 "udev rules. Falling back to libdevmapper node creation.");
1738 /*
1739 * Disable general dm and subsystem rules but keep dm disk rules
1740 * if not flagged out explicitly before. We need /dev/disk content
1741 * for the software that expects it.
1742 */
1743 dmi->event_nr |= (DM_UDEV_DISABLE_DM_RULES_FLAG |
1744 DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG) <<
1745 DM_UDEV_FLAGS_SHIFT;
1746 }
1747
1748 log_debug("dm %s %s %s%s%s %s%.0d%s%.0d%s"
1749 "%s%c%c%s%s %.0" PRIu64 " %s [%u]",
1750 _cmd_data_v4[dmt->type].name,
1751 dmi->name, dmi->uuid, dmt->newname ? " " : "",
1752 dmt->newname ? dmt->newname : "",
1753 dmt->major > 0 ? "(" : "",
1754 dmt->major > 0 ? dmt->major : 0,
1755 dmt->major > 0 ? ":" : "",
1756 dmt->minor > 0 ? dmt->minor : 0,
1757 dmt->major > 0 && dmt->minor == 0 ? "0" : "",
1758 dmt->major > 0 ? ") " : "",
1759 dmt->no_open_count ? 'N' : 'O',
1760 dmt->no_flush ? 'N' : 'F',
1761 dmt->skip_lockfs ? "S " : "",
1762 dmt->query_inactive_table ? "I " : "",
1763 dmt->sector, dmt->message ? dmt->message : "",
1764 dmi->data_size);
1765 #ifdef DM_IOCTLS
1766 if (ioctl(_control_fd, command, dmi) < 0) {
1767 if (errno == ENXIO && ((dmt->type == DM_DEVICE_INFO) ||
1768 (dmt->type == DM_DEVICE_MKNODES) ||
1769 (dmt->type == DM_DEVICE_STATUS)))
1770 dmi->flags &= ~DM_EXISTS_FLAG; /* FIXME */
1771 else {
1772 if (_log_suppress)
1773 log_verbose("device-mapper: %s ioctl "
1774 "failed: %s",
1775 _cmd_data_v4[dmt->type].name,
1776 strerror(errno));
1777 else
1778 log_error("device-mapper: %s ioctl "
1779 "failed: %s",
1780 _cmd_data_v4[dmt->type].name,
1781 strerror(errno));
1782 dm_free(dmi);
1783 return NULL;
1784 }
1785 }
1786 #else /* Userspace alternative for testing */
1787 #endif
1788 return dmi;
1789 }
1790
dm_task_update_nodes(void)1791 void dm_task_update_nodes(void)
1792 {
1793 update_devs();
1794 }
1795
dm_task_run(struct dm_task * dmt)1796 int dm_task_run(struct dm_task *dmt)
1797 {
1798 struct dm_ioctl *dmi;
1799 unsigned command;
1800 int check_udev;
1801
1802 #ifdef DM_COMPAT
1803 if (_dm_version == 1)
1804 return _dm_task_run_v1(dmt);
1805 #endif
1806
1807 if ((unsigned) dmt->type >=
1808 (sizeof(_cmd_data_v4) / sizeof(*_cmd_data_v4))) {
1809 log_error("Internal error: unknown device-mapper task %d",
1810 dmt->type);
1811 return 0;
1812 }
1813
1814 command = _cmd_data_v4[dmt->type].cmd;
1815
1816 /* Old-style creation had a table supplied */
1817 if (dmt->type == DM_DEVICE_CREATE && dmt->head)
1818 return _create_and_load_v4(dmt);
1819
1820 if (dmt->type == DM_DEVICE_MKNODES && !dmt->dev_name &&
1821 !dmt->uuid && dmt->major <= 0)
1822 return _mknodes_v4(dmt);
1823
1824 if ((dmt->type == DM_DEVICE_RELOAD) && dmt->suppress_identical_reload)
1825 return _reload_with_suppression_v4(dmt);
1826
1827 if (!_open_control()) {
1828 _udev_complete(dmt);
1829 return 0;
1830 }
1831
1832 /* FIXME Detect and warn if cookie set but should not be. */
1833 repeat_ioctl:
1834 if (!(dmi = _do_dm_ioctl(dmt, command, _ioctl_buffer_double_factor))) {
1835 _udev_complete(dmt);
1836 return 0;
1837 }
1838
1839 if (dmi->flags & DM_BUFFER_FULL_FLAG) {
1840 switch (dmt->type) {
1841 case DM_DEVICE_LIST_VERSIONS:
1842 case DM_DEVICE_LIST:
1843 case DM_DEVICE_DEPS:
1844 case DM_DEVICE_STATUS:
1845 case DM_DEVICE_TABLE:
1846 case DM_DEVICE_WAITEVENT:
1847 _ioctl_buffer_double_factor++;
1848 dm_free(dmi);
1849 goto repeat_ioctl;
1850 default:
1851 log_error("WARNING: libdevmapper buffer too small for data");
1852 }
1853 }
1854
1855 check_udev = dmt->cookie_set &&
1856 !(dmt->event_nr >> DM_UDEV_FLAGS_SHIFT &
1857 DM_UDEV_DISABLE_DM_RULES_FLAG);
1858
1859 switch (dmt->type) {
1860 case DM_DEVICE_CREATE:
1861 if (dmt->dev_name && *dmt->dev_name)
1862 add_dev_node(dmt->dev_name, MAJOR(dmi->dev),
1863 MINOR(dmi->dev), dmt->uid, dmt->gid,
1864 dmt->mode, check_udev);
1865 break;
1866 case DM_DEVICE_REMOVE:
1867 /* FIXME Kernel needs to fill in dmi->name */
1868 if (dmt->dev_name)
1869 rm_dev_node(dmt->dev_name, check_udev);
1870 break;
1871
1872 case DM_DEVICE_RENAME:
1873 /* FIXME Kernel needs to fill in dmi->name */
1874 if (dmt->dev_name)
1875 rename_dev_node(dmt->dev_name, dmt->newname,
1876 check_udev);
1877 break;
1878
1879 case DM_DEVICE_RESUME:
1880 /* FIXME Kernel needs to fill in dmi->name */
1881 set_dev_node_read_ahead(dmt->dev_name, dmt->read_ahead,
1882 dmt->read_ahead_flags);
1883 break;
1884
1885 case DM_DEVICE_MKNODES:
1886 if (dmi->flags & DM_EXISTS_FLAG)
1887 add_dev_node(dmi->name, MAJOR(dmi->dev),
1888 MINOR(dmi->dev), dmt->uid,
1889 dmt->gid, dmt->mode, 0);
1890 else if (dmt->dev_name)
1891 rm_dev_node(dmt->dev_name, 0);
1892 break;
1893
1894 case DM_DEVICE_STATUS:
1895 case DM_DEVICE_TABLE:
1896 case DM_DEVICE_WAITEVENT:
1897 if (!_unmarshal_status(dmt, dmi))
1898 goto bad;
1899 break;
1900 }
1901
1902 /* Was structure reused? */
1903 if (dmt->dmi.v4)
1904 dm_free(dmt->dmi.v4);
1905 dmt->dmi.v4 = dmi;
1906 return 1;
1907
1908 bad:
1909 dm_free(dmi);
1910 return 0;
1911 }
1912
dm_lib_release(void)1913 void dm_lib_release(void)
1914 {
1915 if (_control_fd != -1) {
1916 close(_control_fd);
1917 _control_fd = -1;
1918 }
1919 update_devs();
1920 }
1921
1922 void dm_pools_check_leaks(void);
1923
dm_lib_exit(void)1924 void dm_lib_exit(void)
1925 {
1926 dm_lib_release();
1927 if (_dm_bitset)
1928 dm_bitset_destroy(_dm_bitset);
1929 _dm_bitset = NULL;
1930 dm_pools_check_leaks();
1931 dm_dump_memory();
1932 _version_ok = 1;
1933 _version_checked = 0;
1934 }
1935